code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping __snake_case =tuple[int, int] class UpperCAmelCase_ : def __init__( self : Optional[int] , UpperCAmelCase__ : set[int] , UpperCAmelCase__ : Mapping[EdgeT, int] ) -> None: lowerCAmelCase = vertices lowerCAmelCase = { (min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items() } def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : EdgeT , UpperCAmelCase__ : int ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase = weight def __UpperCAmelCase ( self : List[Any] ) -> Graph: lowerCAmelCase = Graph({min(self.vertices )} , {} ) lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase = edge lowerCAmelCase = weight subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ ) return subgraph def a_ ( lowerCamelCase : str = "p107_network.txt" ): lowerCAmelCase = os.path.abspath(os.path.dirname(lowerCamelCase ) ) lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase ) lowerCAmelCase = {} lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 with open(lowerCamelCase ) as f: lowerCAmelCase = f.read().strip().split('\n' ) lowerCAmelCase = [line.split(',' ) for line in data] for edgea in range(1 , len(lowerCamelCase ) ): for edgea in range(lowerCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase = Graph(set(range(len(lowerCamelCase ) ) ) , lowerCamelCase ) lowerCAmelCase = graph.prims_algorithm() lowerCAmelCase = sum(graph.edges.values() ) lowerCAmelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F'''{solution() = }''')
4
'''simple docstring''' class UpperCAmelCase_ : def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None: lowerCAmelCase = len(UpperCAmelCase__ ) lowerCAmelCase = [0] * len_array if len_array > 0: lowerCAmelCase = array[0] for i in range(1 , UpperCAmelCase__ ): lowerCAmelCase = self.prefix_sum[i - 1] + array[i] def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int: if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool: lowerCAmelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(UpperCAmelCase__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
4
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE :List[str] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Union[str, Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Optional[int]: UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18} UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = num_channels UpperCamelCase_ = image_size UpperCamelCase_ = min_resolution UpperCamelCase_ = max_resolution UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = apply_ocr def UpperCAmelCase_ ( self )-> str: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __magic_name__ ( snake_case , unittest.TestCase ): UpperCamelCase_ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase_ ( self )-> Any: UpperCamelCase_ = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Dict: UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , "do_resize" ) ) self.assertTrue(hasattr(_lowercase , "size" ) ) self.assertTrue(hasattr(_lowercase , "apply_ocr" ) ) def UpperCAmelCase_ ( self )-> List[Any]: UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def UpperCAmelCase_ ( self )-> Any: pass def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , _lowercase ) self.assertIsInstance(encoding.boxes , _lowercase ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> str: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> Any: # with apply_OCR = True UpperCamelCase_ = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) UpperCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 UpperCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowercase ) self.assertListEqual(encoding.boxes , _lowercase ) # with apply_OCR = False UpperCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
60
0
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> str: A__ : int = len(lowercase_ ) A__ : int = len(lowercase_ ) A__ : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) A__ : list = [] for char_count in range(lowercase_ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(lowercase_ ) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
192
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Union[str, Any] , lowercase_: Optional[Any] ) -> Tuple: # Initialise PyTorch model A__ : str = AlbertConfig.from_json_file(lowercase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) A__ : List[Any] = AlbertForPreTraining(lowercase_ ) # Load weights from tf checkpoint load_tf_weights_in_albert(lowercase_ , lowercase_ , lowercase_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowercase_ ) if __name__ == "__main__": A_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) A_ : int = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
192
1
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) lowerCAmelCase_ : Any = logging.getLogger(__name__) class __lowerCAmelCase ( __a ): def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ): _UpperCAmelCase : str = self.layer[current_layer](lowerCAmelCase__ , lowerCAmelCase__ , head_mask[current_layer] ) _UpperCAmelCase : List[Any] = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __a , ) class __lowerCAmelCase ( __a ): def __init__(self , lowerCAmelCase__ ): super().__init__(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = BertEncoderWithPabee(lowerCAmelCase__ ) self.init_weights() _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : int = 0 def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Union[str, Any] = threshold def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Union[str, Any] = patience def snake_case_ (self ): _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[Any] = 0 def snake_case_ (self ): _UpperCAmelCase : Union[str, Any] = self.inference_layers_num / self.inference_instances_num _UpperCAmelCase : Optional[int] = ( F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(lowerCAmelCase__ ) @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: _UpperCAmelCase : Optional[Any] = input_ids.size() elif inputs_embeds is not None: _UpperCAmelCase : str = inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) _UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _UpperCAmelCase : Optional[Any] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) if token_type_ids is None: _UpperCAmelCase : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = encoder_hidden_states.size() _UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: _UpperCAmelCase : Tuple = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = self.invert_attention_mask(lowerCAmelCase__ ) else: _UpperCAmelCase : List[str] = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _UpperCAmelCase : Any = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers ) _UpperCAmelCase : int = self.embeddings( input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = embedding_output if self.training: _UpperCAmelCase : Union[str, Any] = [] for i in range(self.config.num_hidden_layers ): _UpperCAmelCase : Tuple = self.encoder.adaptive_forward( lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ ) _UpperCAmelCase : Any = self.pooler(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output_layers[i](output_dropout(lowerCAmelCase__ ) ) res.append(lowerCAmelCase__ ) elif self.patience == 0: # Use all layers for inference _UpperCAmelCase : int = self.encoder( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] ) _UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase__ )] else: _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 _UpperCAmelCase : int = self.encoder.adaptive_forward( lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = self.pooler(lowerCAmelCase__ ) _UpperCAmelCase : int = output_layers[i](lowerCAmelCase__ ) if regression: _UpperCAmelCase : List[Any] = logits.detach() if patient_result is not None: _UpperCAmelCase : Union[str, Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: _UpperCAmelCase : List[str] = 0 else: _UpperCAmelCase : Optional[int] = logits.detach().argmax(dim=1 ) if patient_result is not None: _UpperCAmelCase : str = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase__ ) ): patient_counter += 1 else: _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : List[str] = logits if patient_counter == self.patience: break _UpperCAmelCase : List[str] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , __a , ) class __lowerCAmelCase ( __a ): def __init__(self , lowerCAmelCase__ ): super().__init__(lowerCAmelCase__ ) _UpperCAmelCase : int = config.num_labels _UpperCAmelCase : List[Any] = BertModelWithPabee(lowerCAmelCase__ ) _UpperCAmelCase : int = nn.Dropout(config.hidden_dropout_prob ) _UpperCAmelCase : str = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ): _UpperCAmelCase : Optional[int] = self.bert( input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) _UpperCAmelCase : Any = (logits[-1],) if labels is not None: _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : int = 0 for ix, logits_item in enumerate(lowerCAmelCase__ ): if self.num_labels == 1: # We are doing regression _UpperCAmelCase : Dict = MSELoss() _UpperCAmelCase : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: _UpperCAmelCase : Optional[Any] = CrossEntropyLoss() _UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: _UpperCAmelCase : Any = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 _UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs return outputs
170
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) lowerCAmelCase_ : Any = logging.getLogger(__name__) class __lowerCAmelCase ( __a ): def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ): _UpperCAmelCase : str = self.layer[current_layer](lowerCAmelCase__ , lowerCAmelCase__ , head_mask[current_layer] ) _UpperCAmelCase : List[Any] = layer_outputs[0] return hidden_states @add_start_docstrings( """The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __a , ) class __lowerCAmelCase ( __a ): def __init__(self , lowerCAmelCase__ ): super().__init__(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = BertEncoderWithPabee(lowerCAmelCase__ ) self.init_weights() _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : int = 0 def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Union[str, Any] = threshold def snake_case_ (self , lowerCAmelCase__ ): _UpperCAmelCase : Union[str, Any] = patience def snake_case_ (self ): _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[Any] = 0 def snake_case_ (self ): _UpperCAmelCase : Union[str, Any] = self.inference_layers_num / self.inference_instances_num _UpperCAmelCase : Optional[int] = ( F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(lowerCAmelCase__ ) @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: _UpperCAmelCase : Optional[Any] = input_ids.size() elif inputs_embeds is not None: _UpperCAmelCase : str = inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) _UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _UpperCAmelCase : Optional[Any] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) if token_type_ids is None: _UpperCAmelCase : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = encoder_hidden_states.size() _UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: _UpperCAmelCase : Tuple = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = self.invert_attention_mask(lowerCAmelCase__ ) else: _UpperCAmelCase : List[str] = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _UpperCAmelCase : Any = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers ) _UpperCAmelCase : int = self.embeddings( input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = embedding_output if self.training: _UpperCAmelCase : Union[str, Any] = [] for i in range(self.config.num_hidden_layers ): _UpperCAmelCase : Tuple = self.encoder.adaptive_forward( lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ ) _UpperCAmelCase : Any = self.pooler(lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output_layers[i](output_dropout(lowerCAmelCase__ ) ) res.append(lowerCAmelCase__ ) elif self.patience == 0: # Use all layers for inference _UpperCAmelCase : int = self.encoder( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) _UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] ) _UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase__ )] else: _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 _UpperCAmelCase : int = self.encoder.adaptive_forward( lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ ) _UpperCAmelCase : Tuple = self.pooler(lowerCAmelCase__ ) _UpperCAmelCase : int = output_layers[i](lowerCAmelCase__ ) if regression: _UpperCAmelCase : List[Any] = logits.detach() if patient_result is not None: _UpperCAmelCase : Union[str, Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: _UpperCAmelCase : List[str] = 0 else: _UpperCAmelCase : Optional[int] = logits.detach().argmax(dim=1 ) if patient_result is not None: _UpperCAmelCase : str = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase__ ) ): patient_counter += 1 else: _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : List[str] = logits if patient_counter == self.patience: break _UpperCAmelCase : List[str] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( """Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ , __a , ) class __lowerCAmelCase ( __a ): def __init__(self , lowerCAmelCase__ ): super().__init__(lowerCAmelCase__ ) _UpperCAmelCase : int = config.num_labels _UpperCAmelCase : List[Any] = BertModelWithPabee(lowerCAmelCase__ ) _UpperCAmelCase : int = nn.Dropout(config.hidden_dropout_prob ) _UpperCAmelCase : str = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ): _UpperCAmelCase : Optional[int] = self.bert( input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) _UpperCAmelCase : Any = (logits[-1],) if labels is not None: _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : int = 0 for ix, logits_item in enumerate(lowerCAmelCase__ ): if self.num_labels == 1: # We are doing regression _UpperCAmelCase : Dict = MSELoss() _UpperCAmelCase : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: _UpperCAmelCase : Optional[Any] = CrossEntropyLoss() _UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: _UpperCAmelCase : Any = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 _UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs return outputs
170
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a__: List[str] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , )->Dict: if attention_mask is None: A__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=7,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=99,__lowerCamelCase=16,__lowerCamelCase=2,__lowerCamelCase=4,__lowerCamelCase=4,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=32,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=0.02,): A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id A__ = initializer_range def UpperCamelCase ( self ): A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size ),3,self.vocab_size ) A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1),dtype=np.intaa )),-1 ) A__ = shift_tokens_right(__SCREAMING_SNAKE_CASE,1,2 ) A__ = BlenderbotConfig( vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,initializer_range=self.initializer_range,use_cache=__SCREAMING_SNAKE_CASE,) A__ = prepare_blenderbot_inputs_dict(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCamelCase ( self ): A__ = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = 20 A__ = model_class_name(__SCREAMING_SNAKE_CASE ) A__ = model.encode(inputs_dict['''input_ids'''] ) A__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) A__ = model.init_cache(decoder_input_ids.shape[0],__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length),dtype='''i4''' ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),) A__ = model.decode( decoder_input_ids[:, :-1],__SCREAMING_SNAKE_CASE,decoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=__SCREAMING_SNAKE_CASE,decoder_position_ids=__SCREAMING_SNAKE_CASE,) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype='''i4''' ) A__ = model.decode( decoder_input_ids[:, -1:],__SCREAMING_SNAKE_CASE,decoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=outputs_cache.past_key_values,decoder_position_ids=__SCREAMING_SNAKE_CASE,) A__ = model.decode(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3,msg=f"Max diff is {diff}" ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): A__ = 20 A__ = model_class_name(__SCREAMING_SNAKE_CASE ) A__ = model.encode(inputs_dict['''input_ids'''] ) A__ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) A__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ],axis=-1,) A__ = model.init_cache(decoder_input_ids.shape[0],__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) A__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),) A__ = model.decode( decoder_input_ids[:, :-1],__SCREAMING_SNAKE_CASE,decoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=__SCREAMING_SNAKE_CASE,decoder_position_ids=__SCREAMING_SNAKE_CASE,) A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype='''i4''' ) A__ = model.decode( decoder_input_ids[:, -1:],__SCREAMING_SNAKE_CASE,past_key_values=outputs_cache.past_key_values,decoder_attention_mask=__SCREAMING_SNAKE_CASE,decoder_position_ids=__SCREAMING_SNAKE_CASE,) A__ = model.decode(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,decoder_attention_mask=__SCREAMING_SNAKE_CASE ) A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3,msg=f"Max diff is {diff}" ) @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): __SCREAMING_SNAKE_CASE = 99 def UpperCamelCase ( self ): A__ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ],dtype=np.intaa,) A__ = input_ids.shape[0] A__ = BlenderbotConfig( vocab_size=self.vocab_size,d_model=24,encoder_layers=2,decoder_layers=2,encoder_attention_heads=2,decoder_attention_heads=2,encoder_ffn_dim=32,decoder_ffn_dim=32,max_position_embeddings=48,eos_token_id=2,pad_token_id=1,bos_token_id=0,) return config, input_ids, batch_size def UpperCamelCase ( self ): A__ = self._get_config_and_data() A__ = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE ) A__ = lm_model(input_ids=__SCREAMING_SNAKE_CASE ) A__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape,__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): A__ = BlenderbotConfig( vocab_size=self.vocab_size,d_model=14,encoder_layers=2,decoder_layers=2,encoder_attention_heads=2,decoder_attention_heads=2,encoder_ffn_dim=8,decoder_ffn_dim=8,max_position_embeddings=48,) A__ = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE ) A__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]],dtype=np.intaa ) A__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]],dtype=np.intaa ) A__ = lm_model(input_ids=__SCREAMING_SNAKE_CASE,decoder_input_ids=__SCREAMING_SNAKE_CASE ) A__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape,__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): A__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]],dtype=np.intaa ) A__ = shift_tokens_right(__SCREAMING_SNAKE_CASE,1,2 ) A__ = np.equal(__SCREAMING_SNAKE_CASE,1 ).astype(np.floataa ).sum() A__ = np.equal(__SCREAMING_SNAKE_CASE,1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape,input_ids.shape ) self.assertEqual(__SCREAMING_SNAKE_CASE,n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0],2 ).all() ) @require_flax class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __SCREAMING_SNAKE_CASE = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCamelCase ( self ): A__ = FlaxBlenderbotModelTester(self ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) A__ = model_class(__SCREAMING_SNAKE_CASE ) @jax.jit def encode_jitted(__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ): return model.encode(input_ids=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ) with self.subTest('''JIT Enabled''' ): A__ = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): A__ = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ),len(__SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape,output.shape ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = model_class(__SCREAMING_SNAKE_CASE ) A__ = model.encode(inputs_dict['''input_ids'''],inputs_dict['''attention_mask'''] ) A__ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): return model.decode( decoder_input_ids=__SCREAMING_SNAKE_CASE,decoder_attention_mask=__SCREAMING_SNAKE_CASE,encoder_outputs=__SCREAMING_SNAKE_CASE,) with self.subTest('''JIT Enabled''' ): A__ = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): A__ = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(__SCREAMING_SNAKE_CASE ),len(__SCREAMING_SNAKE_CASE ) ) for jitted_output, output in zip(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): self.assertEqual(jitted_output.shape,output.shape ) @slow def UpperCamelCase ( self ): for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids A__ = np.ones((1, 1) ) * model.config.eos_token_id A__ = model(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skipUnless(jax_device != '''cpu''','''3B test too slow on CPU.''' ) @slow def UpperCamelCase ( self ): A__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} A__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} A__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''',from_pt=__SCREAMING_SNAKE_CASE ) A__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) A__ = ['''Sam'''] A__ = tokenizer(__SCREAMING_SNAKE_CASE,return_tensors='''jax''' ) A__ = model.generate(**__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) A__ = '''Sam is a great name. It means "sun" in Gaelic.''' A__ = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ) assert generated_txt[0].strip() == tgt_text
193
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss lowercase_ : int = -(labels.shape[-1] * loss.item()) lowercase_ : Any = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
93
0
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class A__ ( _lowerCamelCase ): _UpperCAmelCase :Tuple = 'align_text_model' def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , **A_ , ): '''simple docstring''' super().__init__(**lowercase_ ) UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = hidden_size UpperCamelCase : List[Any] = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Any = hidden_act UpperCamelCase : Tuple = intermediate_size UpperCamelCase : Any = hidden_dropout_prob UpperCamelCase : Any = attention_probs_dropout_prob UpperCamelCase : Dict = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : Tuple = initializer_range UpperCamelCase : Optional[int] = layer_norm_eps UpperCamelCase : Any = position_embedding_type UpperCamelCase : List[Any] = use_cache UpperCamelCase : Any = pad_token_id @classmethod def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) UpperCamelCase , UpperCamelCase : List[Any] = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": UpperCamelCase : Optional[Any] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowercase_ , **lowercase_ ) class A__ ( _lowerCamelCase ): _UpperCAmelCase :Optional[int] = 'align_vision_model' def __init__( self , A_ = 3 , A_ = 600 , A_ = 2.0 , A_ = 3.1 , A_ = 8 , A_ = [3, 3, 5, 3, 5, 5, 3] , A_ = [32, 16, 24, 40, 80, 112, 192] , A_ = [16, 24, 40, 80, 112, 192, 320] , A_ = [] , A_ = [1, 2, 2, 2, 1, 2, 1] , A_ = [1, 2, 2, 3, 3, 4, 1] , A_ = [1, 6, 6, 6, 6, 6, 6] , A_ = 0.25 , A_ = "swish" , A_ = 2560 , A_ = "mean" , A_ = 0.02 , A_ = 0.0_01 , A_ = 0.99 , A_ = 0.2 , **A_ , ): '''simple docstring''' super().__init__(**lowercase_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Optional[Any] = image_size UpperCamelCase : List[Any] = width_coefficient UpperCamelCase : Optional[Any] = depth_coefficient UpperCamelCase : Any = depth_divisor UpperCamelCase : List[Any] = kernel_sizes UpperCamelCase : Tuple = in_channels UpperCamelCase : Union[str, Any] = out_channels UpperCamelCase : List[Any] = depthwise_padding UpperCamelCase : Union[str, Any] = strides UpperCamelCase : Optional[Any] = num_block_repeats UpperCamelCase : Union[str, Any] = expand_ratios UpperCamelCase : Dict = squeeze_expansion_ratio UpperCamelCase : Dict = hidden_act UpperCamelCase : Any = hidden_dim UpperCamelCase : str = pooling_type UpperCamelCase : Any = initializer_range UpperCamelCase : Optional[Any] = batch_norm_eps UpperCamelCase : Any = batch_norm_momentum UpperCamelCase : List[Any] = drop_connect_rate UpperCamelCase : Tuple = sum(lowercase_ ) * 4 @classmethod def __UpperCamelCase( cls , A_ , **A_ ): '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) UpperCamelCase , UpperCamelCase : Optional[int] = cls.get_config_dict(lowercase_ , **lowercase_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": UpperCamelCase : str = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowercase_ , **lowercase_ ) class A__ ( _lowerCamelCase ): _UpperCAmelCase :Union[str, Any] = 'align' _UpperCAmelCase :Any = True def __init__( self , A_=None , A_=None , A_=640 , A_=1.0 , A_=0.02 , **A_ , ): '''simple docstring''' super().__init__(**lowercase_ ) if text_config is None: UpperCamelCase : Dict = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: UpperCamelCase : Any = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) UpperCamelCase : List[str] = AlignTextConfig(**lowercase_ ) UpperCamelCase : Dict = AlignVisionConfig(**lowercase_ ) UpperCamelCase : int = projection_dim UpperCamelCase : Tuple = temperature_init_value UpperCamelCase : Any = initializer_range @classmethod def __UpperCamelCase( cls , A_ , A_ , **A_ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ ) UpperCamelCase : Optional[int] = self.text_config.to_dict() UpperCamelCase : Optional[int] = self.vision_config.to_dict() UpperCamelCase : int = self.__class__.model_type return output
356
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( __snake_case ): def __init__( self , A_ , A_=None , A_=None , A_=0 ): '''simple docstring''' UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale UpperCamelCase : Optional[int] = 0.0 if loc is None else loc super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCamelCase( self ): '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCamelCase( self ): '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self , A_ , A_ , A_ , **A_ ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Union[str, Any] = args_dim UpperCamelCase : str = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] ) UpperCamelCase : Union[str, Any] = domain_map def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [proj(A_ ) for proj in self.proj] return self.domain_map(*A_ ) class A__ ( nn.Module ): def __init__( self , A_ ): '''simple docstring''' super().__init__() UpperCamelCase : str = function def __UpperCamelCase( self , A_ , *A_ ): '''simple docstring''' return self.function(A_ , *A_ ) class A__ : _UpperCAmelCase :type _UpperCAmelCase :int _UpperCAmelCase :Dict[str, int] def __init__( self , A_ = 1 ): '''simple docstring''' UpperCamelCase : Tuple = dim UpperCamelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.dim == 1: return self.distribution_class(*A_ ) else: return Independent(self.distribution_class(*A_ ) , 1 ) def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ): '''simple docstring''' UpperCamelCase : str = self._base_distribution(A_ ) if loc is None and scale is None: return distr else: return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim ) @property def __UpperCamelCase( self ): '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCamelCase( self ): '''simple docstring''' return len(self.event_shape ) @property def __UpperCamelCase( self ): '''simple docstring''' return 0.0 def __UpperCamelCase( self , A_ ): '''simple docstring''' return ParameterProjection( in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCamelCase( self , *A_ ): '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0 class A__ ( __snake_case ): _UpperCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} _UpperCAmelCase :type = StudentT @classmethod def __UpperCamelCase( cls , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCamelCase : int = 2.0 + cls.squareplus(A_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( __snake_case ): _UpperCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1} _UpperCAmelCase :type = Normal @classmethod def __UpperCamelCase( cls , A_ , A_ ): '''simple docstring''' UpperCamelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( __snake_case ): _UpperCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1} _UpperCAmelCase :type = NegativeBinomial @classmethod def __UpperCamelCase( cls , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = cls.squareplus(A_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Optional[int] = distr_args if self.dim == 1: return self.distribution_class(total_count=A_ , logits=A_ ) else: return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 ) def __UpperCamelCase( self , A_ , A_ = None , A_ = None ): '''simple docstring''' UpperCamelCase , UpperCamelCase : Any = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
140
0
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class A__ : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = scope def a_ ( self ): snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case = ids_tensor([self.batch_size] , self.num_choices ) snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ ( self ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): snake_case = FalconModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) snake_case = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): snake_case = True snake_case = FalconModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) snake_case = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): snake_case = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ): snake_case = True snake_case = True snake_case = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass snake_case = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0] snake_case = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0] # select random slice snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def a_ ( self ): snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" __magic_name__ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = (FalconForCausalLM,) if is_torch_available() else () __magic_name__ = ( { 'feature-extraction': FalconModel, 'text-classification': FalconForSequenceClassification, 'text-generation': FalconForCausalLM, 'question-answering': FalconForQuestionAnswering, 'token-classification': FalconForTokenClassification, 'zero-shot': FalconForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def a_ ( self ): snake_case = FalconModelTester(self ) snake_case = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def a_ ( self ): self.config_tester.run_common_tests() def a_ ( self ): snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def a_ ( self ): snake_case , *snake_case = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: snake_case = alibi self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ ) def a_ ( self ): snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = input_dict['''input_ids'''] snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ ) snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a_ ( self ): snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = '''single_label_classification''' snake_case = input_dict['''input_ids'''] snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ ) snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a_ ( self ): snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = input_dict['''input_ids'''] snake_case = FalconForCausalLM(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) snake_case = input_ids.shape[0] snake_case = model._convert_to_rw_cache(result.past_key_values ) snake_case = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ ) for layer in range(len(lowerCAmelCase__ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def a_ ( self ): snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = '''multi_label_classification''' snake_case = input_dict['''input_ids'''] snake_case = input_ids.ne(1 ).to(lowerCAmelCase__ ) snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def a_ ( self ): for model_class in self.all_generative_model_classes: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(lowerCAmelCase__ , '''use_cache''' ): return snake_case = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ ) if "use_cache" not in inputs: snake_case = True snake_case = model(**lowerCAmelCase__ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return snake_case = ( getattr(lowerCAmelCase__ , '''decoder_layers''' , lowerCAmelCase__ ) or getattr(lowerCAmelCase__ , '''num_decoder_layers''' , lowerCAmelCase__ ) or config.num_hidden_layers ) snake_case = getattr(lowerCAmelCase__ , '''num_kv_heads''' , config.num_attention_heads ) snake_case = getattr(lowerCAmelCase__ , '''d_model''' , config.hidden_size ) snake_case = embed_dim // num_attention_heads snake_case = outputs['''past_key_values'''] self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) snake_case , snake_case = inputs['''input_ids'''].shape for i in range(lowerCAmelCase__ ): if config.new_decoder_architecture: snake_case = config.num_attention_heads elif config.multi_query: snake_case = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def a_ ( self ): snake_case = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) snake_case = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(lowerCAmelCase__ ) snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ ) snake_case = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 ) snake_case = tokenizer.batch_decode(lowerCAmelCase__ )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def a_ ( self ): for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) snake_case = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(lowerCAmelCase__ ) snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 ) @slow def a_ ( self ): with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) snake_case = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(device=lowerCAmelCase__ ) snake_case = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # Test results are the same with and without cache snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) snake_case = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
127
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = KandinskyVaaInpaintPipeline __magic_name__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] __magic_name__ = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] __magic_name__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __magic_name__ = False @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 32 @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return 32 @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: return self.time_input_dim @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return self.time_input_dim * 4 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: return 100 @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCAmelCase_ : List[Any] = UNetaDConditionModel(**lowerCAmelCase_ ) return model @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.dummy_unet UpperCAmelCase_ : str = self.dummy_movq UpperCAmelCase_ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase_ , ) UpperCAmelCase_ : int = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=0 ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) UpperCAmelCase_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCAmelCase_ ) # create init_image UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCAmelCase_ : Tuple = np.ones((64, 64) , dtype=np.floataa ) UpperCAmelCase_ : Optional[Any] = 0 if str(lowerCAmelCase_ ).startswith("mps" ): UpperCAmelCase_ : List[str] = torch.manual_seed(lowerCAmelCase_ ) else: UpperCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) UpperCAmelCase_ : int = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Tuple = "cpu" UpperCAmelCase_ : List[Any] = self.get_dummy_components() UpperCAmelCase_ : Tuple = self.pipeline_class(**lowerCAmelCase_ ) UpperCAmelCase_ : int = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : List[Any] = pipe( **self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0] UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : List[str] = np.array( [0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) UpperCAmelCase_ : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCAmelCase_ : str = np.ones((768, 768) , dtype=np.floataa ) UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : int = "a hat" UpperCAmelCase_ : Tuple = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase_ ) UpperCAmelCase_ : int = KandinskyVaaInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa ) UpperCAmelCase_ : str = pipeline.to(lowerCAmelCase_ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe_prior( lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCAmelCase_ : Dict = pipeline( image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCAmelCase_ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
355
"""simple docstring""" import numpy as np def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) ) UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) ) UpperCAmelCase_ : List[Any] = ya UpperCAmelCase_ : Optional[int] = xa for k in range(A__ ): UpperCAmelCase_ : List[str] = f(A__ ,y[k] ) UpperCAmelCase_ : Any = f(x + 0.5 * h ,y[k] + 0.5 * h * ka ) UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka ) UpperCAmelCase_ : Dict = f(x + h ,y[k] + h * ka ) UpperCAmelCase_ : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
253
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
235
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a__ = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) UpperCAmelCase__ : str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase__ : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase__ : bool = field( default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __lowercase ( self ) -> List[Any]: _a : List[Any] = self.task_name.lower() class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : List[str] = "train" UpperCAmelCase__ : List[str] = "dev" UpperCAmelCase__ : Union[str, Any] = "test" class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : GlueDataTrainingArguments UpperCAmelCase__ : str UpperCAmelCase__ : List[InputFeatures] def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , ) _a : List[str] = args _a : List[str] = glue_processors[args.task_name]() _a : List[Any] = glue_output_modes[args.task_name] if isinstance(_a , _a ): try: _a : Any = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file _a : Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) _a : str = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) _a , _a : Optional[int] = label_list[2], label_list[1] _a : Dict = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _a : Optional[int] = cached_features_file + '''.lock''' with FileLock(_a ): if os.path.exists(_a ) and not args.overwrite_cache: _a : List[Any] = time.time() _a : str = torch.load(_a ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: _a : str = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: _a : Any = self.processor.get_test_examples(args.data_dir ) else: _a : List[str] = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: _a : Dict = examples[:limit_length] _a : Union[str, Any] = glue_convert_examples_to_features( _a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , ) _a : Optional[int] = time.time() torch.save(self.features , _a ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ) -> Optional[int]: return len(self.features ) def __getitem__( self , _a ) -> InputFeatures: return self.features[i] def __lowercase ( self ) -> Tuple: return self.label_list
235
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a :Dict = logging.get_logger(__name__) a :List[str] = { "andreasmadsen/efficient_mlm_m0.40": ( "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" ), } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Any = """roberta-prelayernorm""" def __init__( self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-1_2 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : int = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE__ : int = position_embedding_type SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache SCREAMING_SNAKE_CASE__ : Dict = classifier_dropout class __a (UpperCamelCase_): '''simple docstring''' @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
56
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu a :List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: a :str = json.load(f) @require_torch class __a (unittest.TestCase): '''simple docstring''' def _a ( self , _a ) -> Optional[int]: """simple docstring""" return FSMTTokenizer.from_pretrained(_a ) def _a ( self , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _a ( self , _a , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(_a ) SCREAMING_SNAKE_CASE__ : Any = self.get_model(_a ) SCREAMING_SNAKE_CASE__ : Tuple = bleu_data[pair]["""src"""] SCREAMING_SNAKE_CASE__ : Any = bleu_data[pair]["""tgt"""] SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) SCREAMING_SNAKE_CASE__ : Dict = calculate_bleu(_a , _a ) print(_a ) self.assertGreaterEqual(scores["""bleu"""] , _a )
56
1
"""simple docstring""" from math import isqrt def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]: lowercase__: str = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __UpperCAmelCase , __UpperCAmelCase ): lowercase__: int = False return [i for i in range(2 , __UpperCAmelCase ) if is_prime[i]] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0**8 ) -> int: lowercase__: List[Any] = calculate_prime_numbers(max_number // 2 ) lowercase__: Union[str, Any] = 0 lowercase__: str = 0 lowercase__: Optional[int] = len(__UpperCAmelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
177
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__: str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} lowercase__: Optional[Any] = parent lowercase__: List[Any] = batch_size lowercase__: Tuple = num_channels lowercase__: Optional[Any] = min_resolution lowercase__: Dict = max_resolution lowercase__: Optional[int] = do_resize lowercase__: Any = size lowercase__: Optional[Any] = do_normalize lowercase__: Union[str, Any] = image_mean lowercase__: Tuple = image_std lowercase__: str = do_rescale lowercase__: Any = rescale_factor lowercase__: List[Any] = do_pad def _snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if not batched: lowercase__: Optional[Any] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): lowercase__, lowercase__: Dict = image.size else: lowercase__, lowercase__: Optional[Any] = image.shape[1], image.shape[2] if w < h: lowercase__: List[str] = int(self.size['''shortest_edge'''] * h / w ) lowercase__: Union[str, Any] = self.size['''shortest_edge'''] elif w > h: lowercase__: int = self.size['''shortest_edge'''] lowercase__: int = int(self.size['''shortest_edge'''] * w / h ) else: lowercase__: Union[str, Any] = self.size['''shortest_edge'''] lowercase__: Union[str, Any] = self.size['''shortest_edge'''] else: lowercase__: Optional[int] = [] for image in image_inputs: lowercase__, lowercase__: int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__: Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] lowercase__: Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = YolosImageProcessor if is_vision_available() else None def _snake_case ( self ): lowercase__: int = YolosImageProcessingTester(self ) @property def _snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ): lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) ) def _snake_case ( self ): lowercase__: Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) lowercase__: Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) def _snake_case ( self ): pass def _snake_case ( self ): # Initialize image_processing lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input lowercase__: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__: Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processings lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) lowercase__: Optional[Any] = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors lowercase__: List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' ) lowercase__: Tuple = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) ) @slow def _snake_case ( self ): # prepare image and target lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowercase__: Any = json.loads(f.read() ) lowercase__: Dict = {'''image_id''': 39769, '''annotations''': target} # encode them lowercase__: Dict = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) lowercase__: Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: str = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) ) @slow def _snake_case ( self ): # prepare image, target and masks_path lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowercase__: str = json.loads(f.read() ) lowercase__: List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} lowercase__: Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowercase__: Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' ) lowercase__: Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: int = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify masks lowercase__: Union[str, Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
177
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ["""bert-base-uncased""", """bert-base-cased"""] UpperCAmelCase_ : Union[str, Any] = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class lowerCAmelCase__ ( tf.keras.Model ): '''simple docstring''' def __init__( self : Dict , lowercase_ : int): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : List[str] = tokenizer SCREAMING_SNAKE_CASE_ : Optional[int] = AutoConfig.from_pretrained(lowercase_) SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModel.from_config(lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : List[str] = self.bert(**lowercase_) return out["pooler_output"] @require_tf @require_tensorflow_text class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE_ : Optional[int] = [ BertTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false SCREAMING_SNAKE_CASE_ : Dict = [TFBertTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) SCREAMING_SNAKE_CASE_ : List[str] = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] SCREAMING_SNAKE_CASE_ : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1])) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): SCREAMING_SNAKE_CASE_ : int = tokenizer(lowercase_ , return_tensors='''tf''' , padding='''longest''') SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(lowercase_) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE_ : int = tf_tokenizer(self.paired_sentences) SCREAMING_SNAKE_CASE_ : int = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.function(lowercase_) for test_inputs in (self.test_sentences, self.paired_sentences): SCREAMING_SNAKE_CASE_ : Any = tf.constant(lowercase_) SCREAMING_SNAKE_CASE_ : int = compiled_tokenizer(lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = tf_tokenizer(lowercase_) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE_ : List[str] = ModelToSave(tokenizer=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = tf.convert_to_tensor(self.test_sentences) SCREAMING_SNAKE_CASE_ : int = model(lowercase_) # Build model with some sample inputs with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE_ : Any = Path(lowercase_) / '''saved.model''' model.save(lowercase_) SCREAMING_SNAKE_CASE_ : Dict = tf.keras.models.load_model(lowercase_) SCREAMING_SNAKE_CASE_ : Any = loaded_model(lowercase_) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
318
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
318
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _a ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self: int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase_ ( self: str ): '''simple docstring''' UpperCamelCase__: Any = 1 UpperCamelCase__: int = 3 UpperCamelCase__: Dict = (32, 32) UpperCamelCase__: Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase ) return image @property def UpperCAmelCase_ ( self: List[str] ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__: List[str] = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def UpperCAmelCase_ ( self: Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__: List[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def UpperCAmelCase_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__: Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__lowerCamelCase ) def UpperCAmelCase_ ( self: Any ): '''simple docstring''' UpperCamelCase__: Any = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__: Optional[Any] = self.dummy_cond_unet_upscale UpperCamelCase__: List[str] = DDPMScheduler() UpperCamelCase__: int = DDIMScheduler(prediction_type="v_prediction" ) UpperCamelCase__: str = self.dummy_vae UpperCamelCase__: Dict = self.dummy_text_encoder UpperCamelCase__: Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase__: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__: int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__: str = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase__: Optional[int] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase__: List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__: Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase__: Tuple = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCamelCase__: int = output.images UpperCamelCase__: Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase__: List[Any] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0] UpperCamelCase__: Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__: Optional[int] = image_from_tuple[0, -3:, -3:, -1] UpperCamelCase__: Union[str, Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCamelCase__: Dict = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self: str ): '''simple docstring''' UpperCamelCase__: Any = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase__: Optional[Any] = self.dummy_cond_unet_upscale UpperCamelCase__: Union[str, Any] = DDPMScheduler() UpperCamelCase__: Dict = DDIMScheduler(prediction_type="v_prediction" ) UpperCamelCase__: Any = self.dummy_vae UpperCamelCase__: str = self.dummy_text_encoder UpperCamelCase__: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase__: Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__: str = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase__: List[Any] = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase__: Optional[int] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase__: str = 'A painting of a squirrel eating a burger' UpperCamelCase__: Optional[Any] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCamelCase__: Union[str, Any] = output.images assert image.shape[0] == 2 UpperCamelCase__: Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase__: Any = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCamelCase__: Any = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def UpperCAmelCase_ ( self: Optional[Any] ): '''simple docstring''' UpperCamelCase__: Any = self.dummy_cond_unet_upscale UpperCamelCase__: Union[str, Any] = DDPMScheduler() UpperCamelCase__: Tuple = DDIMScheduler(prediction_type="v_prediction" ) UpperCamelCase__: List[str] = self.dummy_vae UpperCamelCase__: List[Any] = self.dummy_text_encoder UpperCamelCase__: Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase__: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase__: Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCamelCase__: Tuple = unet.half() UpperCamelCase__: Optional[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCamelCase__: List[str] = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase__: Tuple = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase__: List[str] = 'A painting of a squirrel eating a burger' UpperCamelCase__: Optional[int] = torch.manual_seed(0 ) UpperCamelCase__: List[str] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images UpperCamelCase__: List[str] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class _a ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self: Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' UpperCamelCase__: int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCamelCase__: List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCamelCase__: List[str] = 'stabilityai/stable-diffusion-x4-upscaler' UpperCamelCase__: List[str] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() UpperCamelCase__: Optional[Any] = 'a cat sitting on a park bench' UpperCamelCase__: Optional[int] = torch.manual_seed(0 ) UpperCamelCase__: Union[str, Any] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , ) UpperCamelCase__: Any = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCamelCase__: str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCamelCase__: Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler' UpperCamelCase__: Tuple = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() UpperCamelCase__: List[str] = 'a cat sitting on a park bench' UpperCamelCase__: List[str] = torch.manual_seed(0 ) UpperCamelCase__: List[Any] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , ) UpperCamelCase__: Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def UpperCAmelCase_ ( self: str ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase__: str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCamelCase__: Union[str, Any] = 'stabilityai/stable-diffusion-x4-upscaler' UpperCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase__: List[str] = 'a cat sitting on a park bench' UpperCamelCase__: List[Any] = torch.manual_seed(0 ) UpperCamelCase__: List[str] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="np" , ) UpperCamelCase__: Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
149
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase: Tuple = logging.get_logger(__name__) _UpperCamelCase: Optional[int] = '▁' _UpperCamelCase: List[Any] = {'vocab_file': 'prophetnet.tokenizer'} _UpperCamelCase: str = { 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } _UpperCamelCase: Any = { 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } _UpperCamelCase: Dict = { 'microsoft/xprophetnet-large-wiki100-cased': 5_1_2, } def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase : Union[str, Any] = collections.OrderedDict() with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as reader: lowercase : Tuple = reader.readlines() for index, token in enumerate(_UpperCAmelCase ): lowercase : Dict = token.rstrip('\n' ) lowercase : Optional[Any] = index return vocab class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Any="[SEP]", lowerCAmelCase : int="[SEP]", lowerCAmelCase : Tuple="[SEP]", lowerCAmelCase : Union[str, Any]="[UNK]", lowerCAmelCase : Optional[Any]="[PAD]", lowerCAmelCase : int="[CLS]", lowerCAmelCase : Union[str, Any]="[MASK]", lowerCAmelCase : Optional[Dict[str, Any]] = None, **lowerCAmelCase : str, ) -> None: lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, sep_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase ) ) lowercase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase : Tuple = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): lowercase : List[str] = f'''[unused{i}]''' lowercase : Any = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase : Any = 12 lowercase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(lowerCAmelCase ) def __getstate__( self : int ) -> str: lowercase : int = self.__dict__.copy() lowercase : Union[str, Any] = None return state def __setstate__( self : Tuple, lowerCAmelCase : Optional[Any] ) -> int: lowercase : str = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): lowercase : Any = {} lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self : str, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None, lowerCAmelCase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return ([0] * len(lowerCAmelCase )) + [1] return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1] def lowercase ( self : int, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: lowercase : List[str] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self : List[str] ) -> int: return len(self.sp_model ) + self.fairseq_offset def lowercase ( self : Union[str, Any] ) -> str: lowercase : str = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self : Dict, lowerCAmelCase : str ) -> str: return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase ( self : List[str], lowerCAmelCase : Any ) -> List[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : Dict = self.sp_model.PieceToId(lowerCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self : Dict, lowerCAmelCase : Optional[Any] ) -> Dict: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self : Tuple, lowerCAmelCase : List[str] ) -> Optional[Any]: lowercase : Tuple = ''.join(lowerCAmelCase ).replace(lowerCAmelCase, ' ' ).strip() return out_string def lowercase ( self : List[Any], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : List[Any] = os.path.join( lowerCAmelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, 'wb' ) as fi: lowercase : List[str] = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,) def lowercase ( self : List[Any], lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase : int = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
255
0
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Tuple = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } __A : Tuple = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } __A : Any = { "ctrl": 256, } __A : Optional[Any] = { "Pregnancy": 168629, "Christianity": 7675, "Explain": 106423, "Fitness": 63440, "Saving": 63163, "Ask": 27171, "Ass": 95985, "Joke": 163509, "Questions": 45622, "Thoughts": 49605, "Retail": 52342, "Feminism": 164338, "Writing": 11992, "Atheism": 192263, "Netflix": 48616, "Computing": 39639, "Opinion": 43213, "Alone": 44967, "Funny": 58917, "Gaming": 40358, "Human": 4088, "India": 1331, "Joker": 77138, "Diet": 36206, "Legal": 11859, "Norman": 4939, "Tip": 72689, "Weight": 52343, "Movies": 46273, "Running": 23425, "Science": 2090, "Horror": 37793, "Confession": 60572, "Finance": 12250, "Politics": 16360, "Scary": 191985, "Support": 12654, "Technologies": 32516, "Teenage": 66160, "Event": 32769, "Learned": 67460, "Notion": 182770, "Wikipedia": 37583, "Books": 6665, "Extract": 76050, "Confessions": 102701, "Conspiracy": 75932, "Links": 63674, "Narcissus": 150425, "Relationship": 54766, "Relationships": 134796, "Reviews": 41671, "News": 4256, "Translation": 26820, "multilingual": 128406, } def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char _UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) return pairs class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = CONTROL_CODES def __init__( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple="<unk>" , **__UpperCamelCase : str )->Tuple: super().__init__(unk_token=__UpperCamelCase , **__UpperCamelCase ) with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle: _UpperCAmelCase = json.load(__UpperCamelCase ) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle: _UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1] _UpperCAmelCase = [tuple(merge.split() ) for merge in merges] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = {} @property def lowercase__ ( self : Union[str, Any] )->int: return len(self.encoder ) def lowercase__ ( self : Optional[int] )->Optional[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] )->Optional[int]: if token in self.cache: return self.cache[token] _UpperCAmelCase = tuple(__UpperCamelCase ) _UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) _UpperCAmelCase = get_pairs(__UpperCamelCase ) if not pairs: return token while True: _UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(__UpperCamelCase ): try: _UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _UpperCAmelCase = j if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _UpperCAmelCase = tuple(__UpperCamelCase ) _UpperCAmelCase = new_word if len(__UpperCamelCase ) == 1: break else: _UpperCAmelCase = get_pairs(__UpperCamelCase ) _UpperCAmelCase = '''@@ '''.join(__UpperCamelCase ) _UpperCAmelCase = word[:-4] _UpperCAmelCase = word return word def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] )->int: _UpperCAmelCase = [] _UpperCAmelCase = re.findall(r'''\S+\n?''' , __UpperCamelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) ) return split_tokens def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any )->Optional[int]: return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) ) def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] )->str: return self.decoder.get(__UpperCamelCase , self.unk_token ) def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple )->Optional[int]: _UpperCAmelCase = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip() return out_string def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]: if not os.path.isdir(__UpperCamelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' ) _UpperCAmelCase = 0 with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) _UpperCAmelCase = token_index writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
326
"""simple docstring""" import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' with open(_SCREAMING_SNAKE_CASE ) as metadata_file: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE ) # add an entry for [MASK2] _UpperCAmelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''MLukeTokenizer''' with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) # Initialize the embeddings of the special tokens _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCAmelCase = state_dict[bias_name] _UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] _UpperCAmelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCAmelCase = state_dict['''entity_predictions.bias'''] _UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCAmelCase = state_dict[key] else: _UpperCAmelCase = state_dict[key] _UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(_SCREAMING_SNAKE_CASE ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' ) _UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCAmelCase = (0, 9) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 33, 768) ) _UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCAmelCase = torch.Size((1, 1, 768) ) _UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = '''Tokyo is the capital of <mask>.''' _UpperCAmelCase = (24, 30) _UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = encoding['''input_ids'''][0].tolist() _UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.entity_logits[0][0].argmax().item() _UpperCAmelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )] _UpperCAmelCase = {} for entry in data: _UpperCAmelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCAmelCase = entity_id break _UpperCAmelCase = f'{language}:{entity_name}' _UpperCAmelCase = entity_id return new_mapping if __name__ == "__main__": __A : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : @staticmethod def __lowerCAmelCase ( *__A , **__A ) -> str: pass def _snake_case ( lowercase__ : Image ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def _snake_case ( lowercase__ : Image ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = np.array(lowercase__ ) lowerCAmelCase_ :str = npimg.shape return {"hash": hashimage(lowercase__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): UpperCAmelCase_ :str = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) UpperCAmelCase_ :int = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict: lowerCAmelCase_ :int = MaskGenerationPipeline(model=__A , image_processor=__A ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __lowerCAmelCase ( self , __A , __A ) -> List[Any]: pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Dict = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) lowerCAmelCase_ :Optional[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase_ :List[str] = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Tuple = """facebook/sam-vit-huge""" lowerCAmelCase_ :List[str] = pipeline("""mask-generation""" , model=__A ) lowerCAmelCase_ :Tuple = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing lowerCAmelCase_ :Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3}, ] , )
84
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __snake_case = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCamelCase (_a ): _lowercase = field(default=_a , metadata={"""help""": """Whether to use SortishSampler or not."""} ) _lowercase = field( default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) _lowercase = field( default=_a , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) _lowercase = field( default=_a , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def snake_case_ ( self: List[Any] ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(A_,A_ ): __UpperCamelCase = v.to_dict() return d
310
0
'''simple docstring''' UpperCamelCase__ : Union[str, Any] = 8.3_1_4_4_5_9_8 def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if temperature < 0: raise Exception("""Temperature cannot be less than 0 K""" ) if molar_mass <= 0: raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example UpperCamelCase__ : Optional[int] = 300 UpperCamelCase__ : int = 28 UpperCamelCase__ : List[Any] = rms_speed_of_molecule(temperature, molar_mass) print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
164
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowerCamelCase ) -> Optional[Any]: A_ : Any = data A_ : Node | None = None class _lowerCAmelCase : """simple docstring""" def __init__( self ) -> List[str]: A_ : Tuple = None A_ : str = None def __iter__( self ) -> Iterator[Any]: A_ : Dict = self.head while self.head: yield node.data A_ : Optional[Any] = node.next if node == self.head: break def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join(str(_lowerCamelCase ) for item in iter(self ) ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None: self.insert_nth(len(self ) , _lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None: self.insert_nth(0 , _lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None: if index < 0 or index > len(self ): raise IndexError("""list index out of range.""" ) A_ : Optional[int] = Node(_lowerCamelCase ) if self.head is None: A_ : str = new_node # first node points itself A_ : Union[str, Any] = new_node elif index == 0: # insert at head A_ : List[Any] = self.head A_ : List[Any] = new_node else: A_ : List[str] = self.head for _ in range(index - 1 ): A_ : Optional[int] = temp.next A_ : Tuple = temp.next A_ : str = new_node if index == len(self ) - 1: # insert at tail A_ : Optional[int] = new_node def UpperCAmelCase_ ( self ) -> List[Any]: return self.delete_nth(0 ) def UpperCAmelCase_ ( self ) -> Any: return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase_ ( self , _lowerCamelCase = 0 ) -> Any: if not 0 <= index < len(self ): raise IndexError("""list index out of range.""" ) A_ : int = self.head if self.head == self.tail: # just one node A_ : int = None elif index == 0: # delete head node A_ : Union[str, Any] = self.tail.next.next A_ : Tuple = self.head.next else: A_ : Optional[int] = self.head for _ in range(index - 1 ): A_ : Tuple = temp.next A_ : Any = temp.next A_ : Tuple = temp.next.next if index == len(self ) - 1: # delete at tail A_ : List[str] = temp return delete_node.data def UpperCAmelCase_ ( self ) -> bool: return len(self ) == 0 def UpperCAmelCase ( ) -> None: """simple docstring""" A_ : Any = CircularLinkedList() assert len(a_ ) == 0 assert circular_linked_list.is_empty() is True assert str(a_ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(a_ ) == i circular_linked_list.insert_nth(a_ , i + 1 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
164
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Tuple = DiTPipeline lowerCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowerCamelCase : int = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } lowerCamelCase : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase : str = False def lowercase_ ( self ) -> Any: torch.manual_seed(0 ) __lowerCamelCase : Optional[int] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : List[str] = AutoencoderKL() __lowerCamelCase : Any = DDIMScheduler() __lowerCamelCase : Union[str, Any] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): __lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def lowercase_ ( self ) -> Tuple: __lowerCamelCase : List[str] = 'cpu' __lowerCamelCase : List[Any] = self.get_dummy_components() __lowerCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE_ ).images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] ) __lowerCamelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-3 ) def lowercase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase_ ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = torch.manual_seed(0 ) __lowerCamelCase : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __lowerCamelCase : Dict = ['vase', 'umbrella', 'white shark', 'white wolf'] __lowerCamelCase : int = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=40 , output_type='np' ).images for word, image in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : List[str] = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : str = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __lowerCamelCase : Any = ['vase', 'umbrella'] __lowerCamelCase : Tuple = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = torch.manual_seed(0 ) __lowerCamelCase : Optional[int] = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='np' ).images for word, image in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
185
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : List[Any] = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
185
1
"""simple docstring""" __lowercase = 9.8_06_65 def lowercase ( A_ , A_ , A_ = g )-> float: '''simple docstring''' if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
226
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """char""" UpperCAmelCase : Optional[Any] = """bpe""" UpperCAmelCase : Optional[Any] = """wp""" __lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""] UpperCAmelCase : Optional[Any] = """ViTImageProcessor""" UpperCAmelCase : List[Any] = """MgpstrTokenizer""" def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str): a : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) a : List[str] = kwargs.pop("feature_extractor") a : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") a : Union[str, Any] = tokenizer a : int = AutoTokenizer.from_pretrained("gpt2") a : str = AutoTokenizer.from_pretrained("bert-base-uncased") super().__init__(__UpperCAmelCase , __UpperCAmelCase) def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif images is None: return encodings else: a : Any = encodings["input_ids"] return inputs def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]): a , a , a : Tuple = sequences a : Optional[int] = char_preds.size(0) a , a : Dict = self._decode_helper(__UpperCAmelCase , "char") a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe") a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp") a : Any = [] a : Union[str, Any] = [] for i in range(__UpperCAmelCase): a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] a : List[str] = scores.index(max(__UpperCAmelCase)) final_strs.append(strs[max_score_index]) final_scores.append(scores[max_score_index]) a : Dict = {} a : List[str] = final_strs a : str = final_scores a : int = char_strs a : int = bpe_strs a : Tuple = wp_strs return out def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]): if format == DecodeType.CHARACTER: a : int = self.char_decode a : int = 1 a : Dict = "[s]" elif format == DecodeType.BPE: a : List[str] = self.bpe_decode a : List[str] = 2 a : int = "#" elif format == DecodeType.WORDPIECE: a : Union[str, Any] = self.wp_decode a : List[str] = 102 a : int = "[SEP]" else: raise ValueError(f'''Format {format} is not supported.''') a , a : str = [], [] a : Optional[int] = pred_logits.size(0) a : List[str] = pred_logits.size(1) a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase) a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:] a : Any = decoder(__UpperCAmelCase) a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2) a : Union[str, Any] = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase): a : str = preds_str[index].find(__UpperCAmelCase) a : Optional[Any] = preds_str[index][:pred_eos] a : Optional[int] = preds_index[index].cpu().tolist() a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1 a : List[str] = preds_max_prob[index][: pred_eos_index + 1] a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase) conf_scores.append(__UpperCAmelCase) return dec_strs, conf_scores def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any): a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)] return decode_strs def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]): return self.bpe_tokenizer.batch_decode(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)] return decode_strs
226
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] __lowercase = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
43
import argparse import json from tqdm import tqdm def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , ) __UpperCamelCase :str = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: __UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE ) for dpr_record in tqdm(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = dpr_record['''question'''] __UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' ) if __name__ == "__main__": main()
43
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]: # Load configuration defined in the metadata file with open(_lowercase ) as metadata_file: UpperCAmelCase : Any = json.load(_lowercase ) UpperCAmelCase : Any = LukeConfig(use_entity_aware_attention=_lowercase , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )["""module"""] # Load the entity vocab file UpperCAmelCase : Any = load_original_entity_vocab(_lowercase ) # add an entry for [MASK2] UpperCAmelCase : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 UpperCAmelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks UpperCAmelCase : Dict = AddedToken("""<ent>""" , lstrip=_lowercase , rstrip=_lowercase ) UpperCAmelCase : int = AddedToken("""<ent2>""" , lstrip=_lowercase , rstrip=_lowercase ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(_lowercase ) with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , """r""" ) as f: UpperCAmelCase : Optional[Any] = json.load(_lowercase ) UpperCAmelCase : int = """MLukeTokenizer""" with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(_lowercase , _lowercase ) with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = MLukeTokenizer.from_pretrained(_lowercase ) # Initialize the embeddings of the special tokens UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(["""@"""] )[0] UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(["""#"""] )[0] UpperCAmelCase : List[Any] = state_dict["""embeddings.word_embeddings.weight"""] UpperCAmelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 ) UpperCAmelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 ) UpperCAmelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: UpperCAmelCase : Any = state_dict[bias_name] UpperCAmelCase : Dict = decoder_bias[ent_init_index].unsqueeze(0 ) UpperCAmelCase : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 ) UpperCAmelCase : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCAmelCase : int = F'''encoder.layer.{layer_index}.attention.self.''' UpperCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name] UpperCAmelCase : Dict = state_dict[prefix + matrix_name] UpperCAmelCase : List[str] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCAmelCase : int = state_dict["""entity_embeddings.entity_embeddings.weight"""] UpperCAmelCase : str = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) UpperCAmelCase : List[str] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' UpperCAmelCase : Tuple = state_dict["""entity_predictions.bias"""] UpperCAmelCase : List[str] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) UpperCAmelCase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] ) UpperCAmelCase : Any = LukeForMaskedLM(config=_lowercase ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) UpperCAmelCase : Any = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): UpperCAmelCase : int = state_dict[key] else: UpperCAmelCase : List[str] = state_dict[key] UpperCAmelCase : int = model.load_state_dict(_lowercase , strict=_lowercase ) if set(_lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(_lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs UpperCAmelCase : List[Any] = MLukeTokenizer.from_pretrained(_lowercase , task="""entity_classification""" ) UpperCAmelCase : Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" UpperCAmelCase : Dict = (0, 9) UpperCAmelCase : Union[str, Any] = tokenizer(_lowercase , entity_spans=[span] , return_tensors="""pt""" ) UpperCAmelCase : Dict = model(**_lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base UpperCAmelCase : str = torch.Size((1, 3_3, 7_6_8) ) UpperCAmelCase : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base UpperCAmelCase : Tuple = torch.Size((1, 1, 7_6_8) ) UpperCAmelCase : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction UpperCAmelCase : Dict = MLukeTokenizer.from_pretrained(_lowercase ) UpperCAmelCase : Union[str, Any] = """Tokyo is the capital of <mask>.""" UpperCAmelCase : List[str] = (2_4, 3_0) UpperCAmelCase : Tuple = tokenizer(_lowercase , entity_spans=[span] , return_tensors="""pt""" ) UpperCAmelCase : str = model(**_lowercase ) UpperCAmelCase : Union[str, Any] = encoding["""input_ids"""][0].tolist() UpperCAmelCase : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) UpperCAmelCase : Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_lowercase ) UpperCAmelCase : int = outputs.entity_logits[0][0].argmax().item() UpperCAmelCase : Dict = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(_lowercase ) ) model.save_pretrained(_lowercase ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""] UpperCAmelCase : str = [json.loads(_lowercase ) for line in open(_lowercase )] UpperCAmelCase : str = {} for entry in data: UpperCAmelCase : Dict = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: UpperCAmelCase : Optional[int] = entity_id break UpperCAmelCase : Optional[int] = F'''{language}:{entity_name}''' UpperCAmelCase : Optional[int] = entity_id return new_mapping if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) a : Optional[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
371
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : List[str] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a : List[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a : List[Any] = { """facebook/blenderbot_small-90M""": 5_1_2, } class UpperCamelCase_ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = BlenderbotSmallTokenizer def __init__( self , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , A=True , **A , ) -> Union[str, Any]: super().__init__( ByteLevelBPETokenizer( vocab=A , merges=A , add_prefix_space=A , trim_offsets=A , ) , bos_token=A , eos_token=A , unk_token=A , **A , ) UpperCAmelCase : Optional[Any] = add_prefix_space def _lowercase( self , A , A=None ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase( self , A , A = None ) -> List[int]: UpperCAmelCase : Any = [self.sep_token_id] UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
338
0
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(_UpperCAmelCase , n - 1 , _UpperCAmelCase ) * a) % mod else: __a = binary_exponentiation(_UpperCAmelCase , n / 2 , _UpperCAmelCase ) return (b * b) % mod # a prime number __snake_case :Optional[Any] = 701 __snake_case :str = 10_0000_0000 __snake_case :Tuple = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
49
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor __a = logging.get_logger(__name__) class A__ ( UpperCamelCase ): """simple docstring""" def __init__( self : str , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Any ) -> None: """simple docstring""" warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase_ : '''simple docstring''' __snake_case = XGLMConfig __snake_case = {} __snake_case = '''gelu''' def __init__( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=14 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=99 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Union[str, Any]=0.02 , ) ->Any: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_labels a = vocab_size a = d_model a = num_hidden_layers a = num_attention_heads a = ffn_dim a = activation_function a = activation_dropout a = attention_dropout a = max_position_embeddings a = initializer_range a = None a = 0 a = 2 a = 1 def __lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def __lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" a = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = self.get_config() a = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __lowerCAmelCase ( self : List[Any] ) ->Tuple: """simple docstring""" return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def __lowerCAmelCase ( self : Any ) ->str: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class lowercase_ ( lowercase , lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __snake_case = (TFXGLMForCausalLM,) if is_tf_available() else () __snake_case = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __snake_case = False __snake_case = False __snake_case = False def __lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" a = TFXGLMModelTester(self ) a = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def __lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" self.config_tester.run_common_tests() @slow def __lowerCAmelCase ( self : Dict ) ->Optional[int]: """simple docstring""" for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def __lowerCAmelCase ( self : Any ) ->int: """simple docstring""" super().test_resize_token_embeddings() @require_tf class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Any=True ) ->Optional[int]: """simple docstring""" a = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) a = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off a = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on a = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def __lowerCAmelCase ( self : str ) ->str: """simple docstring""" a = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) a = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) a = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) a = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): a = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) a = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) a = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def __lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" a = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) a = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) a = '''left''' # use different length sentences to test batching a = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] a = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase ) a = inputs['''input_ids'''] a = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) a = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids a = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) a = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids a = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) a = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) a = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
0
def _a ( a :float , a :float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(f"""{price_plus_tax(100, 0.25) = }""") print(f"""{price_plus_tax(125.50, 0.05) = }""")
0
1
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = None , ) -> Optional[int]: '''simple docstring''' super().__init__() A_ = initial_learning_rate A_ = warmup_steps A_ = power A_ = decay_schedule_fn A_ = name def __call__( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ = tf.cast(UpperCamelCase__ , tf.floataa ) A_ = tf.cast(self.warmup_steps , tf.floataa ) A_ = global_step_float / warmup_steps_float A_ = self.initial_learning_rate * tf.math.pow(UpperCamelCase__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase__ , ) def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0.0, UpperCAmelCase__ = 0.9, UpperCAmelCase__ = 0.999, UpperCAmelCase__ = 1e-8, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = 0.0, UpperCAmelCase__ = 1.0, UpperCAmelCase__ = None, ) -> Any: A_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase__, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=UpperCAmelCase__, ) if num_warmup_steps: A_ = WarmUp( initial_learning_rate=UpperCAmelCase__, decay_schedule_fn=UpperCAmelCase__, warmup_steps=UpperCAmelCase__, ) if weight_decay_rate > 0.0: A_ = AdamWeightDecay( learning_rate=UpperCAmelCase__, weight_decay_rate=UpperCAmelCase__, beta_a=UpperCAmelCase__, beta_a=UpperCAmelCase__, epsilon=UpperCAmelCase__, clipnorm=UpperCAmelCase__, global_clipnorm=UpperCAmelCase__, exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""], include_in_weight_decay=UpperCAmelCase__, ) else: A_ = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase__, beta_a=UpperCAmelCase__, beta_a=UpperCAmelCase__, epsilon=UpperCAmelCase__, clipnorm=UpperCAmelCase__, global_clipnorm=UpperCAmelCase__, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class A__ ( _snake_case ): def __init__( self , UpperCamelCase__ = 0.001 , UpperCamelCase__ = 0.9 , UpperCamelCase__ = 0.999 , UpperCamelCase__ = 1e-7 , UpperCamelCase__ = False , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "AdamWeightDecay" , **UpperCamelCase__ , ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) A_ = weight_decay_rate A_ = include_in_weight_decay A_ = exclude_from_weight_decay @classmethod def snake_case_ ( cls , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' A_ = {"""WarmUp""": WarmUp} return super(UpperCamelCase__ , cls ).from_config(UpperCamelCase__ , custom_objects=UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' super(UpperCamelCase__ , self )._prepare_local(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A_ = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' A_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' A_ , A_ = list(zip(*UpperCamelCase__ ) ) return super(UpperCamelCase__ , self ).apply_gradients(zip(UpperCamelCase__ , UpperCamelCase__ ) , name=UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ = apply_state or {} A_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ = self._fallback_apply_state(UpperCamelCase__ , UpperCamelCase__ ) A_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]: '''simple docstring''' A_ , A_ = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ ) A_ = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase__ , self )._resource_apply_dense(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Any: '''simple docstring''' A_ , A_ = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ ) A_ = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) with tf.control_dependencies([decay] ): return super(UpperCamelCase__ , self )._resource_apply_sparse(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self ) -> List[str]: '''simple docstring''' A_ = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def snake_case_ ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None: return False return True class A__ ( _snake_case ): def __init__( self ) -> int: '''simple docstring''' A_ = [] A_ = None @property def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' if self._accum_steps is None: A_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def snake_case_ ( self ) -> Any: '''simple docstring''' if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' if not self._gradients: A_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(UpperCamelCase__ ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(UpperCamelCase__ ) != len(self._gradients ): raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase__ )}''' ) for accum_gradient, gradient in zip(self._gradients , UpperCamelCase__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(UpperCamelCase__ ) self._accum_steps.assign_add(1 ) def snake_case_ ( self ) -> List[str]: '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(UpperCamelCase__ ) )
101
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int: A_ = n * (n + 1) * (2 * n + 1) / 6 A_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f"""{solution() = }""")
101
1
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(_lowercase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
66
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =int(SCREAMING_SNAKE_CASE__ ) if n_element < 1: __UpperCamelCase =ValueError('a should be a positive number' ) raise my_error __UpperCamelCase =[1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(0, 0, 0) __UpperCamelCase =1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _A = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') _A = hamming(int(n)) print('-----------------------------------------------------') print(f"""The list with nth numbers is: {hamming_numbers}""") print('-----------------------------------------------------')
117
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ): __UpperCamelCase =np.inf def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None: nonlocal batch_size if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary": __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return None if batch_size is np.inf else batch_size class UpperCAmelCase__ ( A_ ): """simple docstring""" def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict: super().__init__( A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , ) __UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths} __UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase =Parquet( cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , ) def _a ( self ) -> List[Any]: # Build iterable dataset if self.streaming: __UpperCamelCase =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None self.builder.download_and_prepare( download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , ) __UpperCamelCase =self.builder.as_dataset( split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]: __UpperCamelCase =dataset __UpperCamelCase =path_or_buf __UpperCamelCase =batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase =parquet_writer_kwargs def _a ( self ) -> int: __UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs ) else: __UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs ) return written def _a ( self , A_ , A_ , **A_ ) -> int: __UpperCamelCase =0 __UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ ) __UpperCamelCase =self.dataset.features.arrow_schema __UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase =query_table( table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(A_ ) written += batch.nbytes writer.close() return written
117
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
68
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __snake_case = NewType("""DataClass""", Any) __snake_case = NewType("""DataClassType""", Any) def _lowercase ( UpperCamelCase_ ) -> int: '''simple docstring''' if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices} return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ ) def _lowercase ( *, UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field: '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls SCREAMING_SNAKE_CASE__ = {} if aliases is not None: SCREAMING_SNAKE_CASE__ = aliases if help is not None: SCREAMING_SNAKE_CASE__ = help return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ ) class lowercase__ ( _UpperCAmelCase ): A__ : Iterable[DataClassType] def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ): # To make the default appear when using --help if "formatter_class" not in kwargs: SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter super().__init__(**UpperCAmelCase_ ) if dataclasses.is_dataclass(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = [dataclass_types] SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(UpperCAmelCase_ ) @staticmethod def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ): SCREAMING_SNAKE_CASE__ = F'--{field.name}' SCREAMING_SNAKE_CASE__ = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , UpperCAmelCase_ ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = [aliases] SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type ) if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F' Problem encountered in field \'{field.name}\'.' ) if type(UpperCAmelCase_ ) not in field.type.__args__: # filter `str` in Union SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) SCREAMING_SNAKE_CASE__ = ( field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1] ) SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) SCREAMING_SNAKE_CASE__ = {} if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )): if origin_type is Literal: SCREAMING_SNAKE_CASE__ = field.type.__args__ else: SCREAMING_SNAKE_CASE__ = [x.value for x in field.type] SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE__ = field.default else: SCREAMING_SNAKE_CASE__ = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ ) # Hack because type=bool in argparse does not behave as we want. SCREAMING_SNAKE_CASE__ = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way SCREAMING_SNAKE_CASE__ = default # This tells argparse we accept 0 or 1 value after --field_name SCREAMING_SNAKE_CASE__ = '?' # This is the value that will get picked if we do --field_name (without value) SCREAMING_SNAKE_CASE__ = True elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = field.type.__args__[0] SCREAMING_SNAKE_CASE__ = '+' if field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE__ = field.default_factory() elif field.default is dataclasses.MISSING: SCREAMING_SNAKE_CASE__ = True else: SCREAMING_SNAKE_CASE__ = field.type if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE__ = field.default elif field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE__ = field.default_factory() else: SCREAMING_SNAKE_CASE__ = True parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): SCREAMING_SNAKE_CASE__ = False parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ ) def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ): if hasattr(UpperCAmelCase_ , '_argument_group_name' ): SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name ) else: SCREAMING_SNAKE_CASE__ = self try: SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(UpperCAmelCase_ ): if not field.init: continue SCREAMING_SNAKE_CASE__ = type_hints[field.name] self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ ) def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): SCREAMING_SNAKE_CASE__ = [] if args_filename: args_files.append(Path(UpperCAmelCase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values SCREAMING_SNAKE_CASE__ = ArgumentParser() args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ ) if cmd_args_file_paths: args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] ) SCREAMING_SNAKE_CASE__ = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init} SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys} for k in keys: delattr(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ ) outputs.append(UpperCAmelCase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(UpperCAmelCase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE__ = set(args.keys() ) SCREAMING_SNAKE_CASE__ = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init} SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ ) outputs.append(UpperCAmelCase_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' ) return tuple(UpperCAmelCase_ ) def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file: SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() ) SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ )
176
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Optional[Any] = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __lowercase ( pl.LightningModule ): """simple docstring""" def __init__( self , A ) -> Any: '''simple docstring''' super().__init__() lowerCamelCase = model lowerCamelCase = 2 lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __A ( self ) -> int: '''simple docstring''' pass def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ): '''simple docstring''' lowerCamelCase = LongformerModel.from_pretrained(lowerCamelCase__ ) lowerCamelCase = LightningModel(lowerCamelCase__ ) lowerCamelCase = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowerCamelCase__ ) print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' ) if __name__ == "__main__": UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCAmelCase : Optional[int] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
66
1
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate SCREAMING_SNAKE_CASE__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly SCREAMING_SNAKE_CASE__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
176
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--original_config_file""", type=str, required=True, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--image_size""", default=5_12, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") def _lowercase ( UpperCamelCase_ ) -> Dict: '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(F'could not parse string as bool {string}' ) parser.add_argument( """--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool ) parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int) __snake_case = parser.parse_args() __snake_case = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
176
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCAmelCase ( snake_case_ : List[Any] ) -> List[str]: """simple docstring""" _lowerCAmelCase = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2] _lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False _lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False _lowerCAmelCase = True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: _lowerCAmelCase = [3, 3, 3, 3] _lowerCAmelCase = [5, 5, 5, 5] elif "fl4" in model_name: _lowerCAmelCase = [4, 4, 4, 4] _lowerCAmelCase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: _lowerCAmelCase = [3, 3, 3, 3] if "lrf" in model_name: _lowerCAmelCase = [3, 3, 3, 3] else: _lowerCAmelCase = [2, 2, 2, 2] if "tiny" in model_name: _lowerCAmelCase = 96 elif "small" in model_name: _lowerCAmelCase = 96 elif "base" in model_name: _lowerCAmelCase = 128 elif "large" in model_name: _lowerCAmelCase = 192 elif "xlarge" in model_name: _lowerCAmelCase = 256 elif "huge" in model_name: _lowerCAmelCase = 352 # set label information _lowerCAmelCase = """huggingface/label-files""" if "large" in model_name or "huge" in model_name: _lowerCAmelCase = """imagenet-22k-id2label.json""" else: _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} _lowerCAmelCase = {v: k for k, v in idalabel.items()} _lowerCAmelCase = FocalNetConfig( embed_dim=snake_case_ , depths=snake_case_ , focal_levels=snake_case_ , focal_windows=snake_case_ , use_conv_embed=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , use_post_layernorm=snake_case_ , use_layerscale=snake_case_ , ) return config def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> Any: """simple docstring""" if "patch_embed.proj" in name: _lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: _lowerCAmelCase = """encoder.""" + name if "encoder.layers" in name: _lowerCAmelCase = name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: _lowerCAmelCase = name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: _lowerCAmelCase = name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: _lowerCAmelCase = name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: _lowerCAmelCase = name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: _lowerCAmelCase = name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": _lowerCAmelCase = """layernorm.weight""" if name == "norm.bias": _lowerCAmelCase = """layernorm.bias""" if "head" in name: _lowerCAmelCase = name.replace("""head""" , """classifier""" ) else: _lowerCAmelCase = """focalnet.""" + name return name def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[str]=False ) -> int: """simple docstring""" _lowerCAmelCase = { """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on _lowerCAmelCase = model_name_to_url[model_name] print("""Checkpoint URL: """ , snake_case_ ) _lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): _lowerCAmelCase = state_dict.pop(snake_case_ ) _lowerCAmelCase = val _lowerCAmelCase = get_focalnet_config(snake_case_ ) _lowerCAmelCase = FocalNetForImageClassification(snake_case_ ) model.eval() # load state dict model.load_state_dict(snake_case_ ) # verify conversion _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = BitImageProcessor( do_resize=snake_case_ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ , crop_size=224 , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , ) _lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) _lowerCAmelCase = processor(images=snake_case_ , return_tensors="""pt""" ) _lowerCAmelCase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _lowerCAmelCase = image_transforms(snake_case_ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , snake_case_ , atol=1e-4 ) _lowerCAmelCase = model(**snake_case_ ) _lowerCAmelCase = outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": _lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": _lowerCAmelCase = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": _lowerCAmelCase = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": _lowerCAmelCase = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": _lowerCAmelCase = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": _lowerCAmelCase = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
317
"""simple docstring""" import math def __UpperCAmelCase ( snake_case_ : int ) -> list[int]: """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 2 _lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowerCAmelCase = [True] * (end + 1) _lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowerCAmelCase = False start += 1 prime += in_prime _lowerCAmelCase = end + 1 _lowerCAmelCase = min(2 * end , snake_case_ ) while low <= n: _lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: _lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowerCAmelCase = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowerCAmelCase = high + 1 _lowerCAmelCase = min(high + end , snake_case_ ) return prime print(sieve(1_0**6))
317
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowercase_ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowercase_ = """▁""" # Segments (not really needed) lowercase_ = 0 lowercase_ = 1 lowercase_ = 2 lowercase_ = 3 lowercase_ = 4 class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = '''left''' lowerCAmelCase_ = XLNetTokenizer def __init__( self : Tuple , _A : int=None , _A : Tuple=None , _A : Union[str, Any]=False , _A : Dict=True , _A : List[str]=False , _A : str="<s>" , _A : Optional[int]="</s>" , _A : Tuple="<unk>" , _A : int="<sep>" , _A : int="<pad>" , _A : Tuple="<cls>" , _A : Optional[int]="<mask>" , _A : str=["<eop>", "<eod>"] , **_A : Dict , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , ) __SCREAMING_SNAKE_CASE : Optional[int] = 3 __SCREAMING_SNAKE_CASE : Dict = do_lower_case __SCREAMING_SNAKE_CASE : List[str] = remove_space __SCREAMING_SNAKE_CASE : Tuple = keep_accents __SCREAMING_SNAKE_CASE : List[Any] = vocab_file __SCREAMING_SNAKE_CASE : Dict = False if not self.vocab_file else True def UpperCAmelCase__ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCAmelCase__ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Dict = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCAmelCase__ ( self : List[str] , _A : str , _A : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : str = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
303
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
303
1
"""simple docstring""" def lowerCamelCase__ ( a__ : str , a__ : str ) -> int: if len(a__ ) != len(a__ ): raise ValueError("""String lengths must match!""" ) UpperCamelCase_ = 0 for chara, chara in zip(a__ , a__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
368
from __future__ import annotations from typing import Any class lowercase_ : def __init__( self , __UpperCamelCase = 6 ): """simple docstring""" UpperCamelCase_ = None UpperCamelCase_ = None self.create_linked_list(__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = Node() UpperCamelCase_ = current_node UpperCamelCase_ = current_node UpperCamelCase_ = current_node for _ in range(1 , __UpperCamelCase ): UpperCamelCase_ = Node() UpperCamelCase_ = current_node UpperCamelCase_ = previous_node UpperCamelCase_ = current_node UpperCamelCase_ = self.front UpperCamelCase_ = previous_node def lowerCamelCase_ ( self ): """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def lowerCamelCase_ ( self ): """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCamelCase_ = self.rear.next if self.rear: UpperCamelCase_ = data def lowerCamelCase_ ( self ): """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCamelCase_ = self.front.data UpperCamelCase_ = None return data UpperCamelCase_ = self.front UpperCamelCase_ = old_front.next UpperCamelCase_ = old_front.data UpperCamelCase_ = None return data def lowerCamelCase_ ( self ): """simple docstring""" if self.is_empty(): raise Exception("""Empty Queue""" ) def lowerCamelCase_ ( self ): """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowercase_ : def __init__( self ): """simple docstring""" UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = None if __name__ == "__main__": import doctest doctest.testmod()
261
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase = { "configuration_clipseg": [ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "processing_clipseg": ["CLIPSegProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", "CLIPSegVisionModel", "CLIPSegForImageSegmentation", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
140
"""simple docstring""" def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = 1 while len(_UpperCamelCase ) < 1e6: constant.append(str(_UpperCamelCase ) ) i += 1 __lowerCAmelCase = "".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
57
0
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration a_ = pytest.mark.integration a_ = {"""comet"""} a_ = importlib.util.find_spec("""fairseq""") is not None a_ = {"""code_eval"""} a_ = os.name == """nt""" a_ = {"""bertscore""", """frugalscore""", """perplexity"""} a_ = importlib.util.find_spec("""transformers""") is not None def __lowercase ( snake_case_ : str ) ->Any: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : List[Any] ,snake_case_ : int ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( snake_case_ : int ) ->str: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : List[Any] ,snake_case_ : List[str] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : int ,snake_case_ : Dict ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( ) ->Tuple: '''simple docstring''' __A : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class __snake_case ( parameterized.TestCase ): """simple docstring""" _lowerCamelCase = {} _lowerCamelCase = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : int = '''[...]''' __A : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path ) __A : str = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCamelCase ) # check parameters __A : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__lowerCamelCase , metric_module.__name__ ): with self.use_local_metrics(): try: __A : Tuple = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Any = '''[...]''' __A : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path ) # run doctest with self.use_local_metrics(): __A : Union[str, Any] = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCamelCase ): yield else: yield @contextmanager def UpperCamelCase__( self ): '''simple docstring''' def load_local_metric(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ): return load_metric(os.path.join('''metrics''' , __lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase ) with patch('''datasets.load_metric''' ) as mock_load_metric: __A : List[Any] = load_local_metric yield @classmethod def UpperCamelCase__( cls , __lowerCamelCase ): '''simple docstring''' def wrapper(__lowerCamelCase ): __A : Any = contextmanager(__lowerCamelCase ) __A : Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def __lowercase ( snake_case_ : Tuple ) ->int: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: __A : List[str] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def __lowercase ( snake_case_ : List[str] ) ->Dict: '''simple docstring''' import torch def bert_cos_score_idf(snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,*snake_case_ : List[str] ,**snake_case_ : Dict ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: __A : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def __lowercase ( snake_case_ : Optional[int] ) ->List[Any]: '''simple docstring''' def load_from_checkpoint(snake_case_ : str ): class __snake_case : """simple docstring""" def UpperCamelCase__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' assert len(__lowerCamelCase ) == 2 __A : str = [0.1_9, 0.9_2] return scores, sum(__lowerCamelCase ) / len(__lowerCamelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: __A : int = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: __A : Dict = load_from_checkpoint yield def __lowercase ( ) ->str: '''simple docstring''' __A : Optional[Any] = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) ) __A : Optional[int] = '''ERROR''' __A : str = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ): metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
351
"""simple docstring""" a_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) a_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float: '''simple docstring''' __A : Tuple = from_type.lower().strip('''s''' ) __A : Optional[int] = to_type.lower().strip('''s''' ) __A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ ) __A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ ) if from_sanitized not in METRIC_CONVERSION: __A : int = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(snake_case_ )}""" ) raise ValueError(snake_case_ ) if to_sanitized not in METRIC_CONVERSION: __A : str = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(snake_case_ )}""" ) raise ValueError(snake_case_ ) __A : Optional[Any] = METRIC_CONVERSION[from_sanitized] __A : Optional[int] = METRIC_CONVERSION[to_sanitized] __A : Union[str, Any] = 1 if from_exponent > to_exponent: __A : Dict = from_exponent - to_exponent else: __A : Union[str, Any] = -(to_exponent - from_exponent) return value * pow(10 ,snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
291
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase__ : str = False @skip_mps class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ): lowerCAmelCase = StableDiffusionAttendAndExcitePipeline lowerCAmelCase = False lowerCAmelCase = TEXT_TO_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def SCREAMING_SNAKE_CASE ( cls): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase) @classmethod def SCREAMING_SNAKE_CASE ( cls): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' torch.manual_seed(0) __A : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , ) __A : Optional[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0) __A : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) __A : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __A : Tuple = CLIPTextModel(_UpperCAmelCase) __A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __A : List[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0): '''simple docstring''' if str(_UpperCAmelCase).startswith('mps'): __A : List[str] = torch.manual_seed(_UpperCAmelCase) else: __A : Tuple = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase) __A : Union[str, Any] = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = 'cpu' __A : Optional[int] = self.get_dummy_components() __A : Dict = self.pipeline_class(**_UpperCAmelCase) pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) __A : List[str] = self.get_dummy_inputs(_UpperCAmelCase) __A : str = pipe(**_UpperCAmelCase).images __A : Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3)) __A : int = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496]) __A : Tuple = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(_UpperCAmelCase , 1e-3) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5e-4) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2]) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_save_load_local(expected_max_difference=5e-4) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4e-4) @require_torch_gpu @slow class SCREAMING_SNAKE_CASE (unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE ( cls): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase) @classmethod def SCREAMING_SNAKE_CASE ( cls): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Union[str, Any] = torch.manual_seed(51) __A : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa) pipe.to('cuda') __A : List[str] = 'a painting of an elephant with glasses' __A : Any = [5, 7] __A : List[Any] = pipe( prompt=_UpperCAmelCase , token_indices=_UpperCAmelCase , guidance_scale=7.5 , generator=_UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] __A : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy') assert np.abs((expected_image - image).max()) < 5e-1
190
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = tempfile.mkdtemp() # fmt: off __A : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase)))) __A : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __A : List[str] = {'unk_token': '<unk>'} __A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(_UpperCAmelCase) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(_UpperCAmelCase)) __A : str = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48145466, 0.4578275, 0.40821073], 'image_std': [0.26862954, 0.26130258, 0.27577711], } __A : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase) with open(self.image_processor_file , 'w' , encoding='utf-8') as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase): '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] __A : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : str = self.get_tokenizer() __A : int = self.get_rust_tokenizer() __A : Any = self.get_image_processor() __A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) processor_slow.save_pretrained(self.tmpdirname) __A : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase) __A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) processor_fast.save_pretrained(self.tmpdirname) __A : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase) self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase) self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) __A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)') __A : Any = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0) __A : Tuple = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , _UpperCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.get_image_processor() __A : List[Any] = self.get_tokenizer() __A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) __A : List[str] = self.prepare_image_inputs() __A : Dict = image_processor(_UpperCAmelCase , return_tensors='np') __A : str = processor(images=_UpperCAmelCase , return_tensors='np') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = self.get_image_processor() __A : List[str] = self.get_tokenizer() __A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) __A : Dict = 'lower newer' __A : List[Any] = processor(text=_UpperCAmelCase) __A : Optional[int] = tokenizer(_UpperCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = self.get_image_processor() __A : Optional[int] = self.get_tokenizer() __A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) __A : int = 'lower newer' __A : Any = self.prepare_image_inputs() __A : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values']) # test if it raises when no input is passed with pytest.raises(_UpperCAmelCase): processor() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = self.get_image_processor() __A : str = self.get_tokenizer() __A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) __A : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __A : str = processor.batch_decode(_UpperCAmelCase) __A : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = self.get_image_processor() __A : int = self.get_tokenizer() __A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) __A : Any = 'lower newer' __A : Any = self.prepare_image_inputs() __A : Any = processor(text=_UpperCAmelCase , images=_UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
190
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __lowercase : int = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'RegNetConfig' # Base docstring __lowercase : Optional[Any] = 'facebook/regnet-y-040' __lowercase : Optional[int] = [1, 10_88, 7, 7] # Image classification docstring __lowercase : Tuple = 'facebook/regnet-y-040' __lowercase : int = 'tabby, tabby cat' __lowercase : int = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a = 3 , __a = 1 , __a = 1 , __a = "relu" , ): '''simple docstring''' super().__init__() __a : Optional[Any] = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) __a : List[Any] = nn.BatchNormad(lowercase_ ) __a : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity() def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[str] = self.convolution(lowercase_ ) __a : Dict = self.normalization(lowercase_ ) __a : Tuple = self.activation(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a ): '''simple docstring''' super().__init__() __a : str = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __a : Dict = config.num_channels def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __a : Optional[Any] = self.embedder(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a = 2 ): '''simple docstring''' super().__init__() __a : Tuple = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ ) __a : Optional[int] = nn.BatchNormad(lowercase_ ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Optional[int] = self.convolution(lowercase_ ) __a : Optional[int] = self.normalization(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) __a : List[Any] = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[str] = self.pooler(lowercase_ ) __a : Union[str, Any] = self.attention(lowercase_ ) __a : Dict = hidden_state * attention return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a = 1 ): '''simple docstring''' super().__init__() __a : Optional[Any] = in_channels != out_channels or stride != 1 __a : List[str] = max(1 , out_channels // config.groups_width ) __a : Tuple = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) __a : Optional[int] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) __a : Optional[Any] = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Dict = hidden_state __a : int = self.layer(lowercase_ ) __a : List[Any] = self.shortcut(lowercase_ ) hidden_state += residual __a : Tuple = self.activation(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a = 1 ): '''simple docstring''' super().__init__() __a : int = in_channels != out_channels or stride != 1 __a : Any = max(1 , out_channels // config.groups_width ) __a : Dict = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity() ) __a : Union[str, Any] = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , ) __a : int = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = hidden_state __a : str = self.layer(lowercase_ ) __a : List[str] = self.shortcut(lowercase_ ) hidden_state += residual __a : Optional[int] = self.activation(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a = 2 , __a = 2 , ): '''simple docstring''' super().__init__() __a : Optional[Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer __a : Tuple = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = self.layers(lowercase_ ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self , __a ): '''simple docstring''' super().__init__() __a : Dict = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __a : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) ) def __UpperCAmelCase ( self , __a , __a = False , __a = True ): '''simple docstring''' __a : int = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __a : Optional[Any] = hidden_states + (hidden_state,) __a : Union[str, Any] = stage_module(lowercase_ ) if output_hidden_states: __a : str = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ ) class __UpperCamelCase ( lowercase__ ): A_ = RegNetConfig A_ = """regnet""" A_ = """pixel_values""" A_ = True def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(lowercase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def __UpperCAmelCase ( self , __a , __a=False ): '''simple docstring''' if isinstance(lowercase_ , lowercase_ ): __a : Optional[Any] = value __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Dict = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __UpperCamelCase ( lowercase__ ): def __init__( self , __a ): '''simple docstring''' super().__init__(lowercase_ ) __a : int = config __a : List[str] = RegNetEmbeddings(lowercase_ ) __a : Tuple = RegNetEncoder(lowercase_ ) __a : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a , __a = None , __a = None ): '''simple docstring''' __a : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : Dict = return_dict if return_dict is not None else self.config.use_return_dict __a : int = self.embedder(lowercase_ ) __a : Dict = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) __a : Optional[int] = encoder_outputs[0] __a : Optional[int] = self.pooler(lowercase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __UpperCamelCase ( lowercase__ ): def __init__( self , __a ): '''simple docstring''' super().__init__(lowercase_ ) __a : Optional[int] = config.num_labels __a : Optional[int] = RegNetModel(lowercase_ ) # classification head __a : Union[str, Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : int = return_dict if return_dict is not None else self.config.use_return_dict __a : List[str] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : Any = self.classifier(lowercase_ ) __a : Optional[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = "single_label_classification" else: __a : int = "multi_label_classification" if self.config.problem_type == "regression": __a : Tuple = MSELoss() if self.num_labels == 1: __a : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": __a : str = CrossEntropyLoss() __a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : List[str] = BCEWithLogitsLoss() __a : List[Any] = loss_fct(lowercase_ , lowercase_ ) if not return_dict: __a : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
351
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : int = module __a : List[Any] = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module A_ = "bigscience/bloom-1b7" # Constant values A_ = 2.109659552692574 A_ = "Hello my name is" A_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) A_ = 10 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_abit.config self.assertTrue(hasattr(__a , 'quantization_config' ) ) __a : Union[str, Any] = config.to_dict() __a : Tuple = config.to_diff_dict() __a : Tuple = config.to_json_string() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __a : List[Any] = self.model_fpaa.get_memory_footprint() __a : List[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __a : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BitsAndBytesConfig() __a : Tuple = True __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ) __a : List[Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BitsAndBytesConfig() with self.assertRaises(__a ): __a : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Optional[int] = self.model_fpaa.to(torch.floataa ) __a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __a : List[Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.half() # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.float() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : Any = 't5-small' __a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __a : int = AutoTokenizer.from_pretrained(cls.model_name ) __a : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules __a : List[str] = None # test with `t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : Any = model.generate(**__a ) # test with `flan-t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[Any] = model.generate(**__a ) __a : Optional[int] = modules def __UpperCAmelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[str] = model.generate(**__a ) # test with `flan-t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : int = model.generate(**__a ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # model_name __a : List[Any] = 'bigscience/bloom-560m' __a : Union[str, Any] = 't5-small' # Different types of model __a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Sequence classification model __a : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='auto' ) # CausalLM model __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Seq2seq model __a : Any = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __a : str = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __a : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __a : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __a : str = LoRALayer(module.q_proj , rank=16 ) __a : str = LoRALayer(module.k_proj , rank=16 ) __a : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __a : int = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "gpt2-xl" A_ = 3.3191854854152187
294
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCamelCase__ ( lowercase_): def __A (self , UpperCAmelCase ) -> Tuple: with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file: _lowercase =re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' ) _lowercase =input_file.read() _lowercase =regexp.search(lowerCamelCase_ ) return match def __A (self , UpperCAmelCase ) -> str: with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file: _lowercase =re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL ) _lowercase =input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _lowercase =regexp.finditer(lowerCamelCase_ ) _lowercase =[match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __A (self ) -> Dict: _lowercase =Path('''./datasets''' ) _lowercase =list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def __A (self ) -> Dict: _lowercase =Path('''./datasets''' ) _lowercase =list(dataset_paths.absolute().glob('''**/*.py''' ) ) for dataset in dataset_files: if self._no_print_statements(str(lowerCamelCase_ ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
5
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
323
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _A = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
353
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _A = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : Optional[str] = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The column name of the images in the files."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} ) UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} ) UpperCAmelCase__ : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _a ( self ) -> Optional[int]: __UpperCamelCase ={} if self.train_dir is not None: __UpperCamelCase =self.train_dir if self.validation_dir is not None: __UpperCamelCase =self.validation_dir __UpperCamelCase =data_files if data_files else None @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : str = field( default=A_ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) UpperCAmelCase__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) UpperCAmelCase__ : float = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) UpperCAmelCase__ : bool = field( default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : float = field( default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCamelCase =training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __UpperCamelCase =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. __UpperCamelCase =load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0: __UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split ) __UpperCamelCase =split['train'] __UpperCamelCase =split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase ={ 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: __UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ViTImageProcessor() # create model if model_args.model_name_or_path: __UpperCamelCase =ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) __UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) if training_args.do_train: __UpperCamelCase =ds['train'].column_names else: __UpperCamelCase =ds['validation'].column_names if data_args.image_column_name is not None: __UpperCamelCase =data_args.image_column_name elif "image" in column_names: __UpperCamelCase ='image' elif "img" in column_names: __UpperCamelCase ='img' else: __UpperCamelCase =column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __UpperCamelCase =image_processor.size['shortest_edge'] else: __UpperCamelCase =(image_processor.size['height'], image_processor.size['width']) __UpperCamelCase =Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ): __UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __UpperCamelCase =( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ ) # Compute absolute learning rate __UpperCamelCase =( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer __UpperCamelCase =Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: __UpperCamelCase =None if training_args.resume_from_checkpoint is not None: __UpperCamelCase =training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCamelCase =last_checkpoint __UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCamelCase =trainer.evaluate() trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub __UpperCamelCase ={ 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
117
0
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : int = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') UpperCAmelCase_ : Union[str, Any] = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(__a ): os.makedirs(__a ) UpperCAmelCase_ : Dict = model.state_dict() def to_tf_var_name(__lowerCamelCase ): for patt, repl in iter(__a ): UpperCAmelCase_ : Optional[int] = name.replace(__a, __a ) return f"""bert/{name}""" def create_tf_var(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : List[str] = tf.dtypes.as_dtype(tensor.dtype ) UpperCAmelCase_ : Optional[int] = tf.get_variable(dtype=__a, shape=tensor.shape, name=__a, initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCAmelCase_ : Any = to_tf_var_name(__a ) UpperCAmelCase_ : Any = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): UpperCAmelCase_ : List[str] = torch_tensor.T UpperCAmelCase_ : List[Any] = create_tf_var(tensor=__a, name=__a, session=__a ) tf.keras.backend.set_value(__a, __a ) UpperCAmelCase_ : Optional[int] = session.run(__a ) print(f"""Successfully created {tf_name}: {np.allclose(__a, __a )}""" ) UpperCAmelCase_ : str = tf.train.Saver(tf.trainable_variables() ) saver.save(__a, os.path.join(__a, model_name.replace("-", "_" ) + ".ckpt" ) ) def __a ( __lowerCamelCase=None ): UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("--model_name", type=__a, required=__a, help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir", type=__a, default=__a, required=__a, help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path", type=__a, required=__a, help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir", type=__a, required=__a, help="Directory in which to save tensorflow model" ) UpperCAmelCase_ : Dict = parser.parse_args(__a ) UpperCAmelCase_ : Dict = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=__a, ckpt_dir=args.tf_cache_dir, model_name=args.model_name ) if __name__ == "__main__": main()
61
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
0
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging _lowercase: Dict = logging.get_logger(__name__) def a( A : List[str] , A : Union[str, Any] ) -> Optional[int]: """simple docstring""" a = nn.functional.normalize(A ) a = nn.functional.normalize(A ) return torch.mm(A , normalized_text_embeds.t() ) class _lowercase ( lowerCAmelCase ): """simple docstring""" __A = CLIPConfig __A = ["CLIPEncoderLayer"] def __init__(self , lowerCamelCase_ ): """simple docstring""" super().__init__(lowerCamelCase_ ) a = CLIPVisionModel(config.vision_config ) a = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase_ ) a = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCamelCase_ ) a = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCamelCase_ ) a = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCamelCase_ ) a = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCamelCase_ ) @torch.no_grad() def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = self.vision_model(lowerCamelCase_ )[1] # pooled_output a = self.visual_projection(lowerCamelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 a = cosine_distance(lowerCamelCase_ , self.special_care_embeds ).cpu().float().numpy() a = cosine_distance(lowerCamelCase_ , self.concept_embeds ).cpu().float().numpy() a = [] a = image_embeds.shape[0] for i in range(lowerCamelCase_ ): a = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images a = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): a = special_cos_dist[i][concept_idx] a = self.special_care_embeds_weights[concept_idx].item() a = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) a = 0.01 for concept_idx in range(len(cos_dist[0] ) ): a = cos_dist[i][concept_idx] a = self.concept_embeds_weights[concept_idx].item() a = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(lowerCamelCase_ ) result.append(lowerCamelCase_ ) a = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = self.vision_model(lowerCamelCase_ )[1] # pooled_output a = self.visual_projection(lowerCamelCase_ ) a = cosine_distance(lowerCamelCase_ , self.special_care_embeds ) a = cosine_distance(lowerCamelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images a = 0.0 a = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) a = torch.any(special_scores > 0 , dim=1 ) a = special_care * 0.01 a = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) a = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) a = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
71
import os def a( ) -> List[str]: """simple docstring""" with open(os.path.dirname(A ) + "/grid.txt" ) as f: a = [] # noqa: E741 for _ in range(20 ): l.append([int(A ) for x in f.readline().split()] ) a = 0 # right for i in range(20 ): for j in range(17 ): a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: a = temp # down for i in range(17 ): for j in range(20 ): a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: a = temp # diagonal 1 for i in range(17 ): for j in range(17 ): a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: a = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: a = temp return maximum if __name__ == "__main__": print(solution())
71
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : """simple docstring""" def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : str=[10, 20, 30, 40] , lowerCAmelCase__ : Tuple=[1, 1, 2, 1] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int="relu" , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Dict=None , ): SCREAMING_SNAKE_CASE_: Any = parent SCREAMING_SNAKE_CASE_: Optional[int] = batch_size SCREAMING_SNAKE_CASE_: str = image_size SCREAMING_SNAKE_CASE_: Union[str, Any] = num_channels SCREAMING_SNAKE_CASE_: List[str] = embeddings_size SCREAMING_SNAKE_CASE_: str = hidden_sizes SCREAMING_SNAKE_CASE_: Optional[Any] = depths SCREAMING_SNAKE_CASE_: Tuple = is_training SCREAMING_SNAKE_CASE_: Dict = use_labels SCREAMING_SNAKE_CASE_: List[str] = hidden_act SCREAMING_SNAKE_CASE_: Dict = num_labels SCREAMING_SNAKE_CASE_: str = scope SCREAMING_SNAKE_CASE_: Optional[int] = len(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_: Optional[int] = None if self.use_labels: SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_: List[str] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : str): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Tuple = RegNetModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: Dict = self.num_labels SCREAMING_SNAKE_CASE_: List[str] = RegNetForImageClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Optional[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE_: Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _UpperCAmelCase : int = ( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase : Dict = False _UpperCAmelCase : str = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = RegNetModelTester(self) SCREAMING_SNAKE_CASE_: Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Any): return @unittest.skip(reason="RegNet does not use inputs_embeds") def _SCREAMING_SNAKE_CASE ( self : Tuple): pass @unittest.skip(reason="RegNet does not support input and output embeddings") def _SCREAMING_SNAKE_CASE ( self : Any): pass def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: Optional[Any] = model_class(config=lowerCAmelCase__) for name, module in model.named_modules(): if isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase__) , expected_num_stages + 1) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_: List[Any] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE_: Any = layer_type SCREAMING_SNAKE_CASE_: List[str] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_: Dict = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: Optional[Any] = RegNetModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def A_ ( ): SCREAMING_SNAKE_CASE_: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self : Any): return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.default_image_processor SCREAMING_SNAKE_CASE_: Any = prepare_img() SCREAMING_SNAKE_CASE_: Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_: List[Any] = model(**lowerCAmelCase__) # verify the logits SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = torch.tensor([-0.4180, -1.5051, -3.4836]).to(lowerCAmelCase__) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
13
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
13
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowercase_ = ["bert-base-uncased", "bert-base-cased"] lowercase_ = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class SCREAMING_SNAKE_CASE__ ( tf.keras.Model ): def __init__( self : List[Any] , _lowerCAmelCase : Dict ): super().__init__() __snake_case : List[Any] = tokenizer __snake_case : List[str] = AutoConfig.from_pretrained(_lowerCAmelCase ) __snake_case : Optional[Any] = TFAutoModel.from_config(_lowerCAmelCase ) def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] ): __snake_case : Dict = self.tokenizer(_lowerCAmelCase ) __snake_case : Tuple = self.bert(**_lowerCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def snake_case__ ( self : List[str] ): super().setUp() __snake_case : Tuple = [ BertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __snake_case : List[Any] = [TFBertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(_lowerCAmelCase , use_fast_bert_tokenizer=_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __snake_case : Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] __snake_case : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def snake_case__ ( self : str ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __snake_case : Dict = tokenizer(_lowerCAmelCase , return_tensors="""tf""" , padding="""longest""" ) __snake_case : Optional[int] = tf_tokenizer(_lowerCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def snake_case__ ( self : Optional[int] ): for tf_tokenizer in self.tf_tokenizers: __snake_case : Dict = tf_tokenizer(self.paired_sentences ) __snake_case : Dict = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def snake_case__ ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: __snake_case : Union[str, Any] = tf.function(_lowerCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): __snake_case : Optional[int] = tf.constant(_lowerCAmelCase ) __snake_case : List[Any] = compiled_tokenizer(_lowerCAmelCase ) __snake_case : Optional[int] = tf_tokenizer(_lowerCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def snake_case__ ( self : Optional[Any] ): for tf_tokenizer in self.tf_tokenizers: __snake_case : int = ModelToSave(tokenizer=_lowerCAmelCase ) __snake_case : Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) __snake_case : Tuple = model(_lowerCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __snake_case : Optional[Any] = Path(_lowerCAmelCase ) / """saved.model""" model.save(_lowerCAmelCase ) __snake_case : List[str] = tf.keras.models.load_model(_lowerCAmelCase ) __snake_case : Union[str, Any] = loaded_model(_lowerCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
20
from __future__ import annotations def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __snake_case , __snake_case : str = array[indexa], array[indexa] def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if length > 1: __snake_case : Tuple = int(length / 2 ) for i in range(__SCREAMING_SNAKE_CASE , low + middle ): comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE ) bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if length > 1: __snake_case : Optional[Any] = int(length / 2 ) bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 ) bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = input("Enter numbers separated by a comma:\n").strip() lowercase_ = [int(item.strip()) for item in user_input.split(",")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("\nSorted array in ascending order is: ", end="") print(*unsorted, sep=", ") bitonic_merge(unsorted, 0, len(unsorted), 0) print("Sorted array in descending order is: ", end="") print(*unsorted, sep=", ")
20
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : str = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
274
class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A : '''simple docstring''' def __init__( self : List[Any] ) -> str: """simple docstring""" A__ = [ [], [], [], ] def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__lowerCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Tuple ) -> str: """simple docstring""" return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class A : '''simple docstring''' def __init__( self : int ) -> str: """simple docstring""" A__ = [] def a_ ( self : int , __lowerCAmelCase : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__lowerCAmelCase ) def a_ ( self : List[str] ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: A__ = min(self.queue ) self.queue.remove(__lowerCAmelCase ) return data def __str__( self : List[Any] ) -> str: """simple docstring""" return str(self.queue ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
274
1
"""simple docstring""" import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='''%(message)s''') def _snake_case ( _snake_case : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : int ) -> np.ndarray: '''simple docstring''' _A = np.nan for i in range(_snake_case ): _A = features[:, labels == i] _A = data.mean(1 ) # Centralize the data of class i _A = data - column_reshape(_snake_case ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_snake_case , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) _A = np.dot(_snake_case , centered_data.T ) return covariance_sum / features.shape[1] def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : int ) -> np.ndarray: '''simple docstring''' _A = features.mean(1 ) _A = np.nan for i in range(_snake_case ): _A = features[:, labels == i] _A = data.shape[1] _A = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_snake_case ) - column_reshape(_snake_case ) , (column_reshape(_snake_case ) - column_reshape(_snake_case )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) _A = device_data * np.dot( column_reshape(_snake_case ) - column_reshape(_snake_case ) , (column_reshape(_snake_case ) - column_reshape(_snake_case )).T , ) return covariance_sum / features.shape[1] def _snake_case ( _snake_case : np.ndarray , _snake_case : int ) -> np.ndarray: '''simple docstring''' if features.any(): _A = features.mean(1 ) # Center the dataset _A = features - np.reshape(_snake_case , (data_mean.size, 1) ) _A = np.dot(_snake_case , centered_data.T ) / features.shape[1] _A , _A = np.linalg.eigh(_snake_case ) # Take all the columns in the reverse order (-1), and then takes only the first _A = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space _A = np.dot(filtered_eigenvectors.T , _snake_case ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_snake_case ) logging.error('Dataset empty' ) raise AssertionError def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : int , _snake_case : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: _A , _A = eigh( covariance_between_classes(_snake_case , _snake_case , _snake_case ) , covariance_within_classes(_snake_case , _snake_case , _snake_case ) , ) _A = eigenvectors[:, ::-1][:, :dimensions] _A , _A , _A = np.linalg.svd(_snake_case ) _A = svd_matrix[:, 0:dimensions] _A = np.dot(filtered_svd_matrix.T , _snake_case ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_snake_case ) logging.error('Dataset empty' ) raise AssertionError def _snake_case ( ) -> None: '''simple docstring''' _A = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) _A = np.array([0, 0, 0, 1, 1] ) _A = 2 _A = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_snake_case ) as error_info: _A = linear_discriminant_analysis( _snake_case , _snake_case , _snake_case , _snake_case ) if isinstance(_snake_case , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _snake_case ( ) -> None: '''simple docstring''' _A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) _A = 2 _A = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(_snake_case ) as error_info: _A = principal_component_analysis(_snake_case , _snake_case ) if not np.allclose(_snake_case , _snake_case ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
271
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } a = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } a = { '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } a = { '''num_train_timesteps''': 40, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } a = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } a = { '''num_train_timesteps''': 151, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' if isinstance(_snake_case , _snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=False ) -> List[str]: '''simple docstring''' _A = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _A = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _A = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _A = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _A = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _A = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _A = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _A = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _A = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _A = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _A = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _A = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> Optional[int]: '''simple docstring''' _A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _A = checkpoint[F'''{old_prefix}.norm.weight'''] _A = checkpoint[F'''{old_prefix}.norm.bias'''] _A = weight_q.squeeze(-1 ).squeeze(-1 ) _A = bias_q.squeeze(-1 ).squeeze(-1 ) _A = weight_k.squeeze(-1 ).squeeze(-1 ) _A = bias_k.squeeze(-1 ).squeeze(-1 ) _A = weight_v.squeeze(-1 ).squeeze(-1 ) _A = bias_v.squeeze(-1 ).squeeze(-1 ) _A = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def _snake_case ( _snake_case : str , _snake_case : Any ) -> str: '''simple docstring''' _A = torch.load(_snake_case , map_location='cpu' ) _A = {} _A = checkpoint['time_embed.0.weight'] _A = checkpoint['time_embed.0.bias'] _A = checkpoint['time_embed.2.weight'] _A = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _A = checkpoint['label_emb.weight'] _A = checkpoint['input_blocks.0.0.weight'] _A = checkpoint['input_blocks.0.0.bias'] _A = unet_config['down_block_types'] _A = unet_config['layers_per_block'] _A = unet_config['attention_head_dim'] _A = unet_config['block_out_channels'] _A = 1 _A = channels_list[0] for i, layer_type in enumerate(_snake_case ): _A = channels_list[i] _A = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_snake_case ): _A = F'''down_blocks.{i}.resnets.{j}''' _A = F'''input_blocks.{current_layer}.0''' _A = True if j == 0 and downsample_block_has_skip else False _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_snake_case ): _A = F'''down_blocks.{i}.resnets.{j}''' _A = F'''input_blocks.{current_layer}.0''' _A = True if j == 0 and downsample_block_has_skip else False _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case ) _A = F'''down_blocks.{i}.attentions.{j}''' _A = F'''input_blocks.{current_layer}.1''' _A = convert_attention( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) current_layer += 1 if i != len(_snake_case ) - 1: _A = F'''down_blocks.{i}.downsamplers.0''' _A = F'''input_blocks.{current_layer}.0''' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case ) current_layer += 1 _A = current_channels # hardcoded the mid-block for now _A = 'mid_block.resnets.0' _A = 'middle_block.0' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case ) _A = 'mid_block.attentions.0' _A = 'middle_block.1' _A = convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) _A = 'mid_block.resnets.1' _A = 'middle_block.2' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case ) _A = 0 _A = unet_config['up_block_types'] for i, layer_type in enumerate(_snake_case ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _A = F'''up_blocks.{i}.resnets.{j}''' _A = F'''output_blocks.{current_layer}.0''' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case ) current_layer += 1 if i != len(_snake_case ) - 1: _A = F'''up_blocks.{i}.upsamplers.0''' _A = F'''output_blocks.{current_layer-1}.1''' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _A = F'''up_blocks.{i}.resnets.{j}''' _A = F'''output_blocks.{current_layer}.0''' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case ) _A = F'''up_blocks.{i}.attentions.{j}''' _A = F'''output_blocks.{current_layer}.1''' _A = convert_attention( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) current_layer += 1 if i != len(_snake_case ) - 1: _A = F'''up_blocks.{i}.upsamplers.0''' _A = F'''output_blocks.{current_layer-1}.2''' _A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case ) _A = checkpoint['out.0.weight'] _A = checkpoint['out.0.bias'] _A = checkpoint['out.2.weight'] _A = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
271
1
import os def lowercase( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = os.path.join(os.path.dirname(UpperCamelCase_ ) , """num.txt""" ) with open(UpperCamelCase_ ) as file_hand: return str(sum(int(UpperCamelCase_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
343
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
1
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a__ : Union[str, Any] = '''Create a default config file for Accelerate with only a few flags set.''' def UpperCAmelCase__ (lowerCAmelCase_="no" , lowerCAmelCase_ = default_json_config_file , lowerCAmelCase_ = False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ) path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False __SCREAMING_SNAKE_CASE = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) __SCREAMING_SNAKE_CASE = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE = torch.cuda.device_count() __SCREAMING_SNAKE_CASE = num_gpus __SCREAMING_SNAKE_CASE = False if num_gpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_GPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_xpu_available() and use_xpu: __SCREAMING_SNAKE_CASE = torch.xpu.device_count() __SCREAMING_SNAKE_CASE = num_xpus __SCREAMING_SNAKE_CASE = False if num_xpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_XPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_npu_available(): __SCREAMING_SNAKE_CASE = torch.npu.device_count() __SCREAMING_SNAKE_CASE = num_npus __SCREAMING_SNAKE_CASE = False if num_npus > 1: __SCREAMING_SNAKE_CASE = "MULTI_NPU" else: __SCREAMING_SNAKE_CASE = "NO" else: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = "NO" __SCREAMING_SNAKE_CASE = ClusterConfig(**lowerCAmelCase_ ) config.to_json_file(lowerCAmelCase_ ) return path def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = parser.add_parser("default" , parents=lowerCAmelCase_ , help=lowerCAmelCase_ , formatter_class=lowerCAmelCase_ ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCAmelCase_ , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
195
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a__ : List[str] = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
195
1
"""simple docstring""" def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> int: '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def lowercase_ ( _lowerCamelCase: dict[int, list[int]] ) -> list[tuple[int, int]]: '''simple docstring''' __lowerCamelCase : Dict = 0 __lowerCamelCase : List[Any] = len(_lowerCamelCase ) # No of vertices in graph __lowerCamelCase : Any = [0] * n __lowerCamelCase : Union[str, Any] = [False] * n def dfs(_lowerCamelCase: List[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: str ): __lowerCamelCase : str = True __lowerCamelCase : Dict = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , id_ ) __lowerCamelCase : List[Any] = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge __lowerCamelCase : Dict = min(low[at] , low[to] ) __lowerCamelCase : list[tuple[int, int]] = [] for i in range(_lowerCamelCase ): if not visited[i]: dfs(_lowerCamelCase , -1 , _lowerCamelCase , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
135
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { '''configuration_layoutlmv3''': [ '''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv3Config''', '''LayoutLMv3OnnxConfig''', ], '''processing_layoutlmv3''': ['''LayoutLMv3Processor'''], '''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''LayoutLMv3TokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv3ForQuestionAnswering''', '''LayoutLMv3ForSequenceClassification''', '''LayoutLMv3ForTokenClassification''', '''LayoutLMv3Model''', '''LayoutLMv3PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLayoutLMv3ForQuestionAnswering''', '''TFLayoutLMv3ForSequenceClassification''', '''TFLayoutLMv3ForTokenClassification''', '''TFLayoutLMv3Model''', '''TFLayoutLMv3PreTrainedModel''', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''LayoutLMv3FeatureExtractor'''] __A = ['''LayoutLMv3ImageProcessor'''] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
135
1
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> str: if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowercase__: Dict = [] for i in range(__UpperCAmelCase ): lowercase__: Any = i / num_diffusion_timesteps lowercase__: str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :List[Any] = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase :Any = 2 @register_to_config def __init__( self , _UpperCAmelCase = 1000 , _UpperCAmelCase = 0.00_085 , _UpperCAmelCase = 0.012 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = "linspace" , _UpperCAmelCase = 0 , ): if trained_betas is not None: lowercase__: int = torch.tensor(_UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": lowercase__: str = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase__: List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase__: Union[str, Any] = betas_for_alpha_bar(_UpperCAmelCase ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowercase__: List[Any] = 1.0 - self.betas lowercase__: Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if schedule_timesteps is None: lowercase__: Union[str, Any] = self.timesteps lowercase__: Tuple = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowercase__: Tuple = 1 if len(_UpperCAmelCase ) > 1 else 0 else: lowercase__: Dict = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep lowercase__: str = self._index_counter[timestep_int] return indices[pos].item() @property def _snake_case ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , ): lowercase__: Optional[int] = self.index_for_timestep(_UpperCAmelCase ) if self.state_in_first_order: lowercase__: Tuple = self.sigmas[step_index] else: lowercase__: List[Any] = self.sigmas_interpol[step_index] lowercase__: Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ): lowercase__: Tuple = num_inference_steps lowercase__: Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowercase__: int = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowercase__: List[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__: List[Any] = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowercase__: int = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__: str = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowercase__: int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowercase__: Optional[Any] = torch.from_numpy(np.log(_UpperCAmelCase ) ).to(_UpperCAmelCase ) lowercase__: Dict = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase ) lowercase__: Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowercase__: str = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) # interpolate sigmas lowercase__: Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() lowercase__: Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowercase__: Optional[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_UpperCAmelCase ).startswith('''mps''' ): # mps does not support float64 lowercase__: List[Any] = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=torch.floataa ) else: lowercase__: Any = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase ) # interpolate timesteps lowercase__: List[str] = self.sigma_to_t(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=timesteps.dtype ) lowercase__: List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() lowercase__: List[str] = torch.cat([timesteps[:1], interleaved_timesteps] ) lowercase__: int = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowercase__: Optional[Any] = defaultdict(_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): # get log sigma lowercase__: Optional[Any] = sigma.log() # get distribution lowercase__: List[Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range lowercase__: Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowercase__: Tuple = low_idx + 1 lowercase__: Optional[int] = self.log_sigmas[low_idx] lowercase__: List[str] = self.log_sigmas[high_idx] # interpolate sigmas lowercase__: List[str] = (low - log_sigma) / (low - high) lowercase__: Tuple = w.clamp(0 , 1 ) # transform interpolation to time range lowercase__: Any = (1 - w) * low_idx + w * high_idx lowercase__: Tuple = t.view(sigma.shape ) return t @property def _snake_case ( self ): return self.sample is None def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ): lowercase__: Optional[Any] = self.index_for_timestep(_UpperCAmelCase ) # advance index counter by 1 lowercase__: List[str] = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowercase__: List[str] = self.sigmas[step_index] lowercase__: str = self.sigmas_interpol[step_index + 1] lowercase__: Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowercase__: Union[str, Any] = self.sigmas[step_index - 1] lowercase__: Optional[Any] = self.sigmas_interpol[step_index] lowercase__: List[str] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowercase__: Optional[Any] = 0 lowercase__: List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowercase__: Any = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__: Union[str, Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowercase__: Dict = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__: Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowercase__: str = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowercase__: Optional[int] = sigma_interpol - sigma_hat # store for 2nd order step lowercase__: List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowercase__: str = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowercase__: Union[str, Any] = sigma_next - sigma_hat lowercase__: Optional[Any] = self.sample lowercase__: Optional[Any] = None lowercase__: Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowercase__: Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ): # mps does not support float64 lowercase__: Any = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowercase__: Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowercase__: int = self.timesteps.to(original_samples.device ) lowercase__: Any = timesteps.to(original_samples.device ) lowercase__: List[str] = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps] lowercase__: int = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowercase__: str = sigma.unsqueeze(-1 ) lowercase__: List[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
2
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __A = logging.get_logger(__name__) __A = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = "bloom" _UpperCAmelCase :List[str] = ["past_key_values"] _UpperCAmelCase :Optional[Any] = { "num_hidden_layers": "n_layer", "num_attention_heads": "n_head", } def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ): lowercase__: Any = vocab_size # Backward compatibility with n_embed kwarg lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase ) lowercase__: int = hidden_size if n_embed is None else n_embed lowercase__: int = n_layer lowercase__: int = n_head lowercase__: Optional[Any] = layer_norm_epsilon lowercase__: int = initializer_range lowercase__: List[Any] = use_cache lowercase__: str = pretraining_tp lowercase__: Tuple = apply_residual_connection_post_layernorm lowercase__: int = hidden_dropout lowercase__: Optional[Any] = attention_dropout lowercase__: int = bos_token_id lowercase__: Union[str, Any] = eos_token_id lowercase__: Any = slow_but_exact super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :int = version.parse("1.12" ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ): super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ): # TODO: how to do that better? lowercase__: Any = 0 @property def _snake_case ( self ): lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase ) lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__: str = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _snake_case ( self ): return self._config.n_layer @property def _snake_case ( self ): return self._config.n_head @property def _snake_case ( self ): return 1e-3 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ): lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__: Tuple = seqlen + 2 lowercase__: str = self._config.hidden_size // self.num_attention_heads lowercase__: Optional[int] = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase__: Union[str, Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase__: str = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] lowercase__: Tuple = common_inputs['''attention_mask'''] if self.use_past: lowercase__: int = ordered_inputs['''attention_mask'''].dtype lowercase__: List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _snake_case ( self ): return 13
2
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' if "cls_token" in name: UpperCAmelCase__ = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: UpperCAmelCase__ = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: UpperCAmelCase__ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: UpperCAmelCase__ = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: UpperCAmelCase__ = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: UpperCAmelCase__ = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: UpperCAmelCase__ = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: UpperCAmelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: UpperCAmelCase__ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: UpperCAmelCase__ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: UpperCAmelCase__ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: UpperCAmelCase__ = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: UpperCAmelCase__ = key.split(""".""" ) UpperCAmelCase__ = int(key_split[1] ) if "decoder_blocks" in key: UpperCAmelCase__ = config.decoder_hidden_size UpperCAmelCase__ = """decoder.decoder_layers.""" if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] elif "bias" in key: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = config.hidden_size UpperCAmelCase__ = """vit.encoder.layer.""" if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] elif "bias" in key: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = val return orig_state_dict def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ = ViTMAEConfig() if "large" in checkpoint_url: UpperCAmelCase__ = 1024 UpperCAmelCase__ = 4096 UpperCAmelCase__ = 24 UpperCAmelCase__ = 16 elif "huge" in checkpoint_url: UpperCAmelCase__ = 14 UpperCAmelCase__ = 1280 UpperCAmelCase__ = 5120 UpperCAmelCase__ = 32 UpperCAmelCase__ = 16 UpperCAmelCase__ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""] UpperCAmelCase__ = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() UpperCAmelCase__ = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" UpperCAmelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) UpperCAmelCase__ = ViTMAEImageProcessor(size=config.image_size ) UpperCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = outputs.logits if "large" in checkpoint_url: UpperCAmelCase__ = torch.tensor( [[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] ) elif "huge" in checkpoint_url: UpperCAmelCase__ = torch.tensor( [[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] ) else: UpperCAmelCase__ = torch.tensor( [[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase_ = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
346
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
1
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __snake_case ( _lowerCAmelCase : bytes , _lowerCAmelCase : int ) -> List[str]: A_ : int = f"{sampling_rate}" A_ : List[Any] = """1""" A_ : Tuple = """f32le""" A_ : Union[str, Any] = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(_lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: A_ : List[Any] = ffmpeg_process.communicate(_lowerCAmelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error A_ : Union[str, Any] = output_stream[0] A_ : Union[str, Any] = np.frombuffer(_lowerCAmelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : str = "f32le" , ) -> List[Any]: A_ : Optional[Any] = f"{sampling_rate}" A_ : List[str] = """1""" if format_for_conversion == "s16le": A_ : int = 2 elif format_for_conversion == "f32le": A_ : List[Any] = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) A_ : Union[str, Any] = platform.system() if system == "Linux": A_ : List[str] = """alsa""" A_ : List[str] = """default""" elif system == "Darwin": A_ : Optional[Any] = """avfoundation""" A_ : Union[str, Any] = """:0""" elif system == "Windows": A_ : Tuple = """dshow""" A_ : Union[str, Any] = """default""" A_ : Optional[int] = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] A_ : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample A_ : Any = _ffmpeg_stream(_lowerCAmelCase , _lowerCAmelCase ) for item in iterator: yield item def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCAmelCase : str = "f32le" , ) -> Union[str, Any]: if stream_chunk_s is not None: A_ : List[Any] = stream_chunk_s else: A_ : Dict = chunk_length_s A_ : Optional[int] = ffmpeg_microphone(_lowerCAmelCase , _lowerCAmelCase , format_for_conversion=_lowerCAmelCase ) if format_for_conversion == "s16le": A_ : List[Any] = np.intaa A_ : Optional[Any] = 2 elif format_for_conversion == "f32le": A_ : Optional[int] = np.floataa A_ : str = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) if stride_length_s is None: A_ : List[Any] = chunk_length_s / 6 A_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(_lowerCAmelCase , (int, float) ): A_ : Optional[Any] = [stride_length_s, stride_length_s] A_ : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample A_ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample A_ : Tuple = datetime.datetime.now() A_ : List[str] = datetime.timedelta(seconds=_lowerCAmelCase ) for item in chunk_bytes_iter(_lowerCAmelCase , _lowerCAmelCase , stride=(stride_left, stride_right) , stream=_lowerCAmelCase ): # Put everything back in numpy scale A_ : List[Any] = np.frombuffer(item["raw"] , dtype=_lowerCAmelCase ) A_ : Optional[int] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) A_ : Dict = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple[int, int] , _lowerCAmelCase : bool = False ) -> Optional[Any]: A_ : Optional[int] = B"""""" A_ : Tuple = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) A_ : Tuple = 0 for raw in iterator: acc += raw if stream and len(_lowerCAmelCase ) < chunk_len: A_ : Tuple = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(_lowerCAmelCase ) >= chunk_len: # We are flushing the accumulator A_ : Union[str, Any] = (_stride_left, stride_right) A_ : str = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: A_ : Any = False yield item A_ : Any = stride_left A_ : Optional[int] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(_lowerCAmelCase ) > stride_left: A_ : List[Any] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: A_ : Optional[Any] = False yield item def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ) -> List[str]: A_ : Union[str, Any] = 2**24 # 16Mo try: with subprocess.Popen(_lowerCAmelCase , stdout=subprocess.PIPE , bufsize=_lowerCAmelCase ) as ffmpeg_process: while True: A_ : Optional[Any] = ffmpeg_process.stdout.read(_lowerCAmelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
359
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _lowerCAmelCase : str = logging.get_logger(__name__) @dataclass class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :List[str]=False , snake_case :Optional[Any]=False , snake_case :Union[str, Any]=6.0 , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=False , snake_case :str=False , snake_case :Optional[Any]=None , snake_case :int="fp4" , snake_case :int=False , **snake_case :Optional[Any] , ): '''simple docstring''' A_ : int = load_in_abit A_ : Union[str, Any] = load_in_abit A_ : str = llm_inta_threshold A_ : str = llm_inta_skip_modules A_ : List[Any] = llm_inta_enable_fpaa_cpu_offload A_ : Optional[int] = llm_inta_has_fpaa_weight A_ : Optional[int] = bnb_abit_quant_type A_ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: A_ : List[Any] = torch.floataa elif isinstance(snake_case , snake_case ): A_ : Any = getattr(snake_case , snake_case ) elif isinstance(snake_case , torch.dtype ): A_ : Union[str, Any] = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" ) self.post_init() def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' if not isinstance(self.llm_inta_threshold , snake_case ): raise ValueError("llm_int8_threshold must be a float" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , snake_case ): raise ValueError("llm_int8_skip_modules must be a list of strings" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , snake_case ): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" ) if not isinstance(self.llm_inta_has_fpaa_weight , snake_case ): raise ValueError("llm_int8_has_fp16_weight must be a boolean" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" ) if not isinstance(self.bnb_abit_quant_type , snake_case ): raise ValueError("bnb_4bit_quant_type must be a string" ) if not isinstance(self.bnb_abit_use_double_quant , snake_case ): raise ValueError("bnb_4bit_use_double_quant must be a boolean" ) if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' return self.load_in_abit or self.load_in_abit def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def SCREAMING_SNAKE_CASE ( cls :List[str] , snake_case :Dict , snake_case :str , **snake_case :Dict ): '''simple docstring''' A_ : str = cls(**snake_case ) A_ : Any = [] for key, value in kwargs.items(): if hasattr(snake_case , snake_case ): setattr(snake_case , snake_case , snake_case ) to_remove.append(snake_case ) for key in to_remove: kwargs.pop(snake_case , snake_case ) if return_unused_kwargs: return config, kwargs else: return config def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, os.PathLike] ): '''simple docstring''' with open(snake_case , "w" , encoding="utf-8" ) as writer: A_ : List[Any] = self.to_dict() A_ : int = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n" writer.write(snake_case ) def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' A_ : List[str] = copy.deepcopy(self.__dict__ ) A_ : Optional[int] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1] return output def __repr__( self :List[str] ): '''simple docstring''' return f"{self.__class__.__name__} {self.to_json_string()}" def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :bool = True ): '''simple docstring''' if use_diff is True: A_ : List[str] = self.to_diff_dict() else: A_ : int = self.to_dict() return json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n" def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : List[Any] = self.to_dict() # get the default config dict A_ : Optional[Any] = BitsAndBytesConfig().to_dict() A_ : List[Any] = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: A_ : int = value return serializable_config_dict
70
0
'''simple docstring''' import re def a__ ( lowerCAmelCase__ ) -> str: return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : List[Any] = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: try: UpperCAmelCase__ : int = split_input(lowerCAmelCase_ ) if upper: UpperCAmelCase__ : List[Any] = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase__ : List[Any] = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def a__ ( lowerCAmelCase__ ) -> List[str]: return to_simple_case(lowerCAmelCase_ ) def a__ ( lowerCAmelCase__ ) -> Any: try: UpperCAmelCase__ : Tuple = to_simple_case(lowerCAmelCase_ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , '''_''' ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: return to_complex_case(lowerCAmelCase_ , lowerCAmelCase_ , '''-''' ) if __name__ == "__main__": __import__('''doctest''').testmod()
181
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' _snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' _snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase ( self : str ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , ) def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]: __lowerCAmelCase = 0.0 for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ): n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0 __lowerCAmelCase = n_correct / len(lowerCAmelCase_ ) return { "accuracy": accuracy, }
284
0
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' ) __UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' ) __UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' ) __UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' ) __UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item __UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' ) __UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' ) __UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]: assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __UpperCamelCase = old_checkpoint[path] __UpperCamelCase = old_tensor.shape[0] // 3 __UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3 __UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 ) __UpperCamelCase = query.reshape(snake_case ) __UpperCamelCase = key.reshape(snake_case ) __UpperCamelCase = value.reshape(snake_case ) for path in paths: __UpperCamelCase = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __UpperCamelCase = old_checkpoint[path['old']][:, :, 0] else: __UpperCamelCase = old_checkpoint[path['old']] def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]: __UpperCamelCase = {} __UpperCamelCase = checkpoint['time_embed.0.weight'] __UpperCamelCase = checkpoint['time_embed.0.bias'] __UpperCamelCase = checkpoint['time_embed.2.weight'] __UpperCamelCase = checkpoint['time_embed.2.bias'] __UpperCamelCase = checkpoint['input_blocks.0.0.weight'] __UpperCamelCase = checkpoint['input_blocks.0.0.bias'] __UpperCamelCase = checkpoint['out.0.weight'] __UpperCamelCase = checkpoint['out.0.bias'] __UpperCamelCase = checkpoint['out.2.weight'] __UpperCamelCase = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the middle blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the output blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } for i in range(1 , snake_case ): __UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1) __UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1) __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.weight' ] __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} __UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case ) if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , ) __UpperCamelCase = middle_blocks[0] __UpperCamelCase = middle_blocks[1] __UpperCamelCase = middle_blocks[2] __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case ) for i in range(snake_case ): __UpperCamelCase = i // (config['num_res_blocks'] + 1) __UpperCamelCase = i % (config['num_res_blocks'] + 1) __UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]] __UpperCamelCase = {} for layer in output_block_layers: __UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case ) else: __UpperCamelCase = [layer_name] if len(snake_case ) > 1: __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(snake_case ) == 2: __UpperCamelCase = [] if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , ) else: __UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] ) __UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] ) __UpperCamelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase : int = json.loads(f.read()) UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase : Optional[Any] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
263
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = field __UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths} __UpperCamelCase = Json( cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , ) def UpperCAmelCase ( self ): '''simple docstring''' if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCamelCase = num_proc __UpperCamelCase = 'utf-8' __UpperCamelCase = to_json_kwargs def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase ) __UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' ) __UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False ) __UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True ) __UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer: __UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' ' was passed. Please provide a local path instead.' ) __UpperCamelCase = self._write( file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs ) return written def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args __UpperCamelCase = query_table( table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCamelCase = batch.to_pandas().to_json( path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase ) if not json_str.endswith('\n' ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): __UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__UpperCAmelCase ) else: __UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): written += file_obj.write(__UpperCAmelCase ) return written
263
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def snake_case_ (_a : Any ): return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def snake_case_ (_a : List[Any] ): UpperCAmelCase = create_tensor(_a ) UpperCAmelCase = gather(_a ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def snake_case_ (_a : str ): UpperCAmelCase = [state.process_index] UpperCAmelCase = gather_object(_a ) assert len(_a ) == state.num_processes, F"{gathered_obj}, {len(_a )} != {state.num_processes}" assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}" def snake_case_ (_a : Optional[int] ): UpperCAmelCase = create_tensor(_a ) UpperCAmelCase = broadcast(_a ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def snake_case_ (_a : Optional[Any] ): # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase = pad_across_processes(_a ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def snake_case_ (_a : Tuple ): # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase = create_tensor(_a ) UpperCAmelCase = reduce(_a , '''sum''' ) UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}" def snake_case_ (_a : Optional[Any] ): # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase = create_tensor(_a ) UpperCAmelCase = reduce(_a , '''mean''' ) UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_a , _a ), F"{reduced_tensor} != {truth_tensor}" def snake_case_ (_a : Any ): # For xla_spawn (TPUs) main() def snake_case_ (): UpperCAmelCase = PartialState() state.print(F"State: {state}" ) state.print('''testing gather''' ) test_gather(_a ) state.print('''testing gather_object''' ) test_gather_object(_a ) state.print('''testing broadcast''' ) test_broadcast(_a ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(_a ) state.print('''testing reduce_sum''' ) test_reduce_sum(_a ) state.print('''testing reduce_mean''' ) test_reduce_mean(_a ) if __name__ == "__main__": main()
34
'''simple docstring''' # Lint as: python3 import itertools import os import re a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])") a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])") a : str = re.compile(R"(?<!_)_(?!_)") a : List[Any] = re.compile(R"(_{2,})") a : List[Any] = R"^\w+(\.\w+)*$" a : Dict = R"<>:/\|?*" def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ ) return name.lower() def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ ) UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" ) def lowercase ( __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if os.path.basename(__magic_name__ ) != name: raise ValueError(F"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , __magic_name__ ): raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." ) return F"{filename_prefix_for_name(__magic_name__ )}-{split}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) if filetype_suffix: prefix += F".{filetype_suffix}" UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) return F"{filepath}*" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) if shard_lengths: UpperCAmelCase : Tuple = len(__magic_name__ ) UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )] if filetype_suffix: UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames] return filenames else: UpperCAmelCase : int = prefix if filetype_suffix: filename += F".{filetype_suffix}" return [filename]
311
0
'''simple docstring''' # flake8: noqa # Lint as: python3 __a: Tuple = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
214
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = None def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=0.9_9_9 , UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowercase__ : str = [] for i in range(UpperCAmelCase ): lowercase__ : int = i / num_diffusion_timesteps lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ) , UpperCAmelCase ) ) return torch.tensor(UpperCAmelCase , dtype=torch.floataa ) class UpperCAmelCase ( a__ , a__ ): '''simple docstring''' @register_to_config def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = "fixed_small_log" , __lowerCAmelCase = True , __lowerCAmelCase = 1.0 , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "squaredcos_cap_v2" , ) -> Optional[int]: if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowercase__ : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase ) lowercase__ : List[Any] = 1.0 - self.betas lowercase__ : int = torch.cumprod(self.alphas , dim=0 ) lowercase__ : str = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowercase__ : Optional[Any] = 1.0 # setable values lowercase__ : Optional[Any] = None lowercase__ : List[Any] = torch.from_numpy(np.arange(0 , __lowerCAmelCase )[::-1].copy() ) lowercase__ : Tuple = variance_type def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> torch.FloatTensor: return sample def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]: lowercase__ : List[str] = num_inference_steps lowercase__ : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowercase__ : List[str] = (np.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowercase__ : str = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Tuple: if prev_timestep is None: lowercase__ : Any = t - 1 lowercase__ : Any = self.alphas_cumprod[t] lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowercase__ : str = 1 - alpha_prod_t lowercase__ : int = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowercase__ : Tuple = self.betas[t] else: lowercase__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : Any = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowercase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowercase__ : int = torch.log(torch.clamp(__lowerCAmelCase , min=1E-20 ) ) lowercase__ : Dict = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowercase__ : Union[str, Any] = variance.log() lowercase__ : Optional[int] = beta.log() lowercase__ : Tuple = (predicted_variance + 1) / 2 lowercase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=None , __lowerCAmelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: lowercase__ : Tuple = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowercase__ , lowercase__ : str = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 ) else: lowercase__ : Dict = None # 1. compute alphas, betas if prev_timestep is None: lowercase__ : int = t - 1 lowercase__ : Optional[int] = self.alphas_cumprod[t] lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowercase__ : Optional[int] = 1 - alpha_prod_t lowercase__ : List[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowercase__ : Optional[int] = self.betas[t] lowercase__ : Optional[Any] = self.alphas[t] else: lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev lowercase__ : Optional[int] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Dict = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : List[Any] = torch.clamp( __lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowercase__ : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowercase__ : List[Any] = 0 if t > 0: lowercase__ : Dict = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase , device=model_output.device ) lowercase__ : Union[str, Any] = self._get_variance( __lowerCAmelCase , predicted_variance=__lowerCAmelCase , prev_timestep=__lowerCAmelCase , ) if self.variance_type == "fixed_small_log": lowercase__ : List[Any] = variance elif self.variance_type == "learned_range": lowercase__ : int = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ''' for the UnCLIPScheduler.''' ) lowercase__ : List[str] = variance * variance_noise lowercase__ : Union[str, Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowercase__ : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowercase__ : str = timesteps.to(original_samples.device ) lowercase__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5 lowercase__ : List[str] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 ) lowercase__ : int = (1 - alphas_cumprod[timesteps]) ** 0.5 lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowercase__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowercase__ : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
214
1
from typing import Any def lowerCAmelCase_ ( __lowerCAmelCase )-> str: '''simple docstring''' if not input_list: return [] UpperCAmelCase : Dict =[input_list.count(__UpperCamelCase ) for value in input_list] UpperCAmelCase : Tuple =max(__UpperCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(__UpperCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
348
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCAmelCase_ ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Dict = tempfile.mkdtemp() __lowercase : Any = BlipImageProcessor() __lowercase : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) __lowercase : str = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) __lowercase : str = InstructBlipProcessor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) processor.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).qformer_tokenizer def _lowerCamelCase ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ) -> Any: __lowercase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase : Any = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self ) -> str: __lowercase : Any = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) __lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowercase : Dict = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) __lowercase : int = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Any: __lowercase : Any = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : Any = self.get_qformer_tokenizer() __lowercase : List[str] = InstructBlipProcessor( tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ ) __lowercase : int = self.prepare_image_inputs() __lowercase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' ) __lowercase : Tuple = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self ) -> str: __lowercase : str = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Optional[Any] = self.get_qformer_tokenizer() __lowercase : List[str] = InstructBlipProcessor( tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ ) __lowercase : Dict = '''lower newer''' __lowercase : int = processor(text=UpperCamelCase_ ) __lowercase : List[str] = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ ) __lowercase : Union[str, Any] = qformer_tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def _lowerCamelCase ( self ) -> List[str]: __lowercase : Union[str, Any] = self.get_image_processor() __lowercase : Union[str, Any] = self.get_tokenizer() __lowercase : Optional[int] = self.get_qformer_tokenizer() __lowercase : List[str] = InstructBlipProcessor( tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ ) __lowercase : Optional[int] = '''lower newer''' __lowercase : Any = self.prepare_image_inputs() __lowercase : List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Dict: __lowercase : Any = self.get_image_processor() __lowercase : List[str] = self.get_tokenizer() __lowercase : Any = self.get_qformer_tokenizer() __lowercase : Tuple = InstructBlipProcessor( tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ ) __lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase : List[str] = processor.batch_decode(UpperCamelCase_ ) __lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: __lowercase : List[str] = self.get_image_processor() __lowercase : List[str] = self.get_tokenizer() __lowercase : List[Any] = self.get_qformer_tokenizer() __lowercase : Optional[Any] = InstructBlipProcessor( tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ ) __lowercase : Any = '''lower newer''' __lowercase : Union[str, Any] = self.prepare_image_inputs() __lowercase : Union[str, Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
249
0
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def lowercase__ ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ort.SessionOptions() lowerCamelCase_ =False return options def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowerCamelCase_ =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A red cat sitting on a park bench''' lowerCamelCase_ =np.random.RandomState(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowerCamelCase_ =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''', subfolder='''scheduler''', revision='''onnx''' ) lowerCamelCase_ =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A red cat sitting on a park bench''' lowerCamelCase_ =np.random.RandomState(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
366
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
0
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str: lowercase : Any = """""" for word_or_phrase in separated: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": from doctest import testmod testmod()
20
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" , [None, "v2"] ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]: __lowerCAmelCase: Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE ) assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}"
217
0
from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=__lowercase): '''simple docstring''' _A = ['''torch''', '''scipy'''] def __init__( self :Tuple , *a :Optional[Any] , **a :Tuple ) -> str: requires_backends(self , ["torch", "scipy"] ) @classmethod def _lowerCamelCase ( cls :int , *a :str , **a :Union[str, Any] ) -> Optional[int]: requires_backends(cls , ["torch", "scipy"] ) @classmethod def _lowerCamelCase ( cls :Dict , *a :List[str] , **a :Tuple ) -> Any: requires_backends(cls , ["torch", "scipy"] )
363
import random def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict: '''simple docstring''' __UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_lowerCamelCase) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_lowerCamelCase): for j in range(i + 1 , _lowerCamelCase): if random.random() < probability: graph[i].append(_lowerCamelCase) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_lowerCamelCase) return graph def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict: '''simple docstring''' return { i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase) } if __name__ == "__main__": import doctest doctest.testmod()
151
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class A ( A_ ): def _A (self ): __lowercase= tempfile.mkdtemp() __lowercase= 8 # DPR tok __lowercase= [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __lowercase= os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __lowercase= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase= {'unk_token': '<unk>'} __lowercase= os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase ) ) def _A (self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _A (self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _A (self ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def _A (self ): __lowercase= os.path.join(self.tmpdirname , 'rag_tokenizer' ) __lowercase= RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __lowercase= RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowerCAmelCase ) rag_tokenizer.save_pretrained(lowerCAmelCase ) __lowercase= RagTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def _A (self ): __lowercase= RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __lowercase= [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __lowercase= tokenizer(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow def _A (self ): __lowercase= RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __lowercase= [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __lowercase= tokenizer(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase )
295
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
1
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: bytes , __lowerCamelCase: int ): '''simple docstring''' lowercase_ = F'{sampling_rate}' lowercase_ = "1" lowercase_ = "f32le" lowercase_ = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(__lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: lowercase_ = ffmpeg_process.communicate(__lowerCamelCase ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error lowercase_ = output_stream[0] lowercase_ = np.frombuffer(__lowerCamelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: str = "f32le" , ): '''simple docstring''' lowercase_ = F'{sampling_rate}' lowercase_ = "1" if format_for_conversion == "s16le": lowercase_ = 2 elif format_for_conversion == "f32le": lowercase_ = 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) lowercase_ = platform.system() if system == "Linux": lowercase_ = "alsa" lowercase_ = "default" elif system == "Darwin": lowercase_ = "avfoundation" lowercase_ = ":0" elif system == "Windows": lowercase_ = "dshow" lowercase_ = "default" lowercase_ = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowercase_ = _ffmpeg_stream(__lowerCamelCase , __lowerCamelCase ) for item in iterator: yield item def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[Tuple[float, float], float]] = None , __lowerCamelCase: str = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: lowercase_ = stream_chunk_s else: lowercase_ = chunk_length_s lowercase_ = ffmpeg_microphone(__lowerCamelCase , __lowerCamelCase , format_for_conversion=__lowerCamelCase ) if format_for_conversion == "s16le": lowercase_ = np.intaa lowercase_ = 2 elif format_for_conversion == "f32le": lowercase_ = np.floataa lowercase_ = 4 else: raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) if stride_length_s is None: lowercase_ = chunk_length_s / 6 lowercase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__lowerCamelCase , (int, float) ): lowercase_ = [stride_length_s, stride_length_s] lowercase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowercase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowercase_ = datetime.datetime.now() lowercase_ = datetime.timedelta(seconds=__lowerCamelCase ) for item in chunk_bytes_iter(__lowerCamelCase , __lowerCamelCase , stride=(stride_left, stride_right) , stream=__lowerCamelCase ): # Put everything back in numpy scale lowercase_ = np.frombuffer(item["raw"] , dtype=__lowerCamelCase ) lowercase_ = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) lowercase_ = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Tuple[int, int] , __lowerCamelCase: bool = False ): '''simple docstring''' lowercase_ = B"" lowercase_ , lowercase_ = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' ) lowercase_ = 0 for raw in iterator: acc += raw if stream and len(__lowerCamelCase ) < chunk_len: lowercase_ = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowercase_ = (_stride_left, stride_right) lowercase_ = {"raw": acc[:chunk_len], "stride": stride} if stream: lowercase_ = False yield item lowercase_ = stride_left lowercase_ = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__lowerCamelCase ) > stride_left: lowercase_ = {"raw": acc, "stride": (_stride_left, 0)} if stream: lowercase_ = False yield item def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: int ): '''simple docstring''' lowercase_ = 2**24 # 16Mo try: with subprocess.Popen(__lowerCamelCase , stdout=subprocess.PIPE , bufsize=__lowerCamelCase ) as ffmpeg_process: while True: lowercase_ = ffmpeg_process.stdout.read(__lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
297
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
297
1
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
62
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int: """simple docstring""" A : str = limit + 1 A : Tuple = [0] * limit for first_term in range(1 , _lowerCAmelCase ): for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): A : Any = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"""{solution() = }""")
116
0
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : str = MvpTokenizer _A : str = MvpTokenizerFast _A : Union[str, Any] = True _A : Optional[int] = filter_roberta_detectors def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __lowercase : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowercase : List[str] = {"""unk_token""": """<unk>"""} __lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) def lowerCAmelCase ( self : Dict , **__a : Tuple ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Tuple , __a : str ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __lowercase : str = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : List[Any] = tokenizer(__a , max_length=len(__a ) , padding=__a , return_tensors="""pt""" ) self.assertIsInstance(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __lowercase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__a , __a ) # Test that special tokens are reset @require_torch def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : str = tokenizer(__a , padding=__a , return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __a ) self.assertIn("""attention_mask""" , __a ) self.assertNotIn("""labels""" , __a ) self.assertNotIn("""decoder_attention_mask""" , __a ) @require_torch def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : Tuple = tokenizer(text_target=__a , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : Tuple = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__a , truncation=__a , return_tensors="""pt""" ) self.assertIsInstance(__a , __a ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" __lowercase : int = ["""A long paragraph for summarization."""] __lowercase : List[str] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : List[str] = tokenizer(__a , text_target=__a , return_tensors="""pt""" ) __lowercase : int = inputs["""input_ids"""] __lowercase : List[str] = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a ) __lowercase : str = """A, <mask> AllenNLP sentence.""" __lowercase : List[Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) __lowercase : Any = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __lowercase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __lowercase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
306
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _snake_case = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def A ( _lowerCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _lowerCAmelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
36
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( __lowercase , unittest.TestCase): '''simple docstring''' _A = KandinskyVaaPriorPipeline _A = ['prompt'] _A = ['prompt', 'negative_prompt'] _A = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] _A = False @property def _lowerCamelCase ( self :Tuple ) -> Optional[Any]: return 3_2 @property def _lowerCamelCase ( self :List[str] ) -> List[str]: return 3_2 @property def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple: return self.time_input_dim @property def _lowerCamelCase ( self :Union[str, Any] ) -> Any: return self.time_input_dim * 4 @property def _lowerCamelCase ( self :Tuple ) -> str: return 1_0_0 @property def _lowerCamelCase ( self :Union[str, Any] ) -> Any: __UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def _lowerCamelCase ( self :str ) -> int: torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(a ) @property def _lowerCamelCase ( self :int ) -> Optional[Any]: torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = { "num_attention_heads": 2, "attention_head_dim": 1_2, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } __UpperCamelCase : List[Any] = PriorTransformer(**a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def _lowerCamelCase ( self :Dict ) -> Union[str, Any]: torch.manual_seed(0 ) __UpperCamelCase : Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) __UpperCamelCase : int = CLIPVisionModelWithProjection(a ) return model @property def _lowerCamelCase ( self :Dict ) -> Optional[Any]: __UpperCamelCase : List[str] = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , ) return image_processor def _lowerCamelCase ( self :str ) -> Optional[int]: __UpperCamelCase : str = self.dummy_prior __UpperCamelCase : int = self.dummy_image_encoder __UpperCamelCase : Tuple = self.dummy_text_encoder __UpperCamelCase : int = self.dummy_tokenizer __UpperCamelCase : Optional[Any] = self.dummy_image_processor __UpperCamelCase : int = UnCLIPScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , ) __UpperCamelCase : List[Any] = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any: if str(a ).startswith("mps" ): __UpperCamelCase : int = torch.manual_seed(a ) else: __UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a ) __UpperCamelCase : int = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def _lowerCamelCase ( self :List[Any] ) -> Dict: __UpperCamelCase : int = "cpu" __UpperCamelCase : List[str] = self.get_dummy_components() __UpperCamelCase : List[str] = self.pipeline_class(**a ) __UpperCamelCase : Tuple = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) ) __UpperCamelCase : int = output.image_embeds __UpperCamelCase : Optional[int] = pipe( **self.get_dummy_inputs(a ) , return_dict=a , )[0] __UpperCamelCase : Union[str, Any] = image[0, -1_0:] __UpperCamelCase : List[str] = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) __UpperCamelCase : List[Any] = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _lowerCamelCase ( self :int ) -> Union[str, Any]: __UpperCamelCase : str = torch_device == "cpu" __UpperCamelCase : List[str] = True __UpperCamelCase : List[Any] = False self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , ) @skip_mps def _lowerCamelCase ( self :Any ) -> int: __UpperCamelCase : Optional[Any] = torch_device == "cpu" __UpperCamelCase : Dict = False self._test_attention_slicing_forward_pass( test_max_difference=a , test_mean_pixel_difference=a , )
232
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool _snake_case = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class _snake_case ( _lowercase ): lowerCamelCase__: Tuple = "facebook/nllb-200-distilled-600M" lowerCamelCase__: List[Any] = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) lowerCamelCase__: Any = "translator" lowerCamelCase__: List[Any] = AutoTokenizer lowerCamelCase__: Dict = AutoModelForSeqaSeqLM lowerCamelCase__: Optional[Any] = LANGUAGE_CODES lowerCamelCase__: List[str] = ["text", "text", "text"] lowerCamelCase__: Dict = ["text"] def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[str] ) -> str: if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''' ) __UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang] __UpperCAmelCase : Dict = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __lowerCamelCase , return_tensors="pt" , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> str: return self.model.generate(**__lowerCamelCase ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Optional[int]: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCamelCase )
342
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]: __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = num_stages __UpperCAmelCase : List[str] = hidden_sizes __UpperCAmelCase : Any = depths __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : Optional[Any] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = out_features __UpperCAmelCase : Tuple = out_indices __UpperCAmelCase : List[Any] = scope def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self: Tuple ) -> List[Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple: __UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __UpperCAmelCase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self: int ) -> List[str]: __UpperCAmelCase : int = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : str = {"pixel_values": pixel_values} return config, inputs_dict def _lowerCamelCase ( self: List[Any] ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _snake_case ( _lowercase , _lowercase , unittest.TestCase ): lowerCamelCase__: Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase__: str = ( {"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase__: Tuple = False lowerCamelCase__: int = False lowerCamelCase__: Dict = False lowerCamelCase__: int = False lowerCamelCase__: Any = False def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self ) __UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCamelCase ( self: Dict ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self: List[Any] ) -> int: return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def _lowerCamelCase ( self: Any ) -> Any: pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def _lowerCamelCase ( self: str ) -> Optional[Any]: pass def _lowerCamelCase ( self: List[Any] ) -> int: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Optional[Any] = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue __UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: Optional[int] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) __UpperCAmelCase : Any = model(**__lowerCamelCase ).loss loss.backward() def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : str = model_class(__lowerCamelCase ) __UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[Any] = [*signature.parameters.keys()] __UpperCAmelCase : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCamelCase ( self: str ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCamelCase ( self: Union[str, Any] ) -> Dict: def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ): __UpperCAmelCase : Any = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): __UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) __UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def _lowerCamelCase ( self: Dict ) -> List[Any]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def _UpperCamelCase ( ) -> List[Any]: __UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self: Optional[int] ) -> Dict: return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def _lowerCamelCase ( self: List[Any] ) -> Tuple: __UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase ) __UpperCAmelCase : List[str] = self.default_image_processor __UpperCAmelCase : Optional[Any] = prepare_img() __UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**__lowerCamelCase ) # verify the logits __UpperCAmelCase : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) __UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
342
1
import pprint import requests lowerCAmelCase = '''https://zenquotes.io/api''' def _lowerCamelCase( ) -> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + '/today' ).json() def _lowerCamelCase( ) -> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + '/random' ).json() if __name__ == "__main__": lowerCAmelCase = random_quotes() pprint.pprint(response)
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''', [None, '''v2'''] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple: '''simple docstring''' snake_case_ = hf_hub_url(repo_id=__UpperCAmelCase, path=__UpperCAmelCase, revision=__UpperCAmelCase ) assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__UpperCAmelCase )}"
72
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a : Any = get_logger(__name__) a : Union[str, Any] = Path(__file__).parent / 'model_card_template.md' a : List[Any] = uuida().hex a : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES a : str = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES a : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __magic_name__ ( __UpperCAmelCase = None ) -> str: '''simple docstring''' snake_case_ = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F"; torch/{_torch_version}" if is_flax_available(): ua += F"; jax/{_jax_version}" ua += F"; flax/{_flax_version}" if is_onnx_available(): ua += F"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(__UpperCAmelCase, __UpperCAmelCase ): ua += "; " + user_agent return ua def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if token is None: snake_case_ = HfFolder.get_token() if organization is None: snake_case_ = whoami(__UpperCAmelCase )['''name'''] return F"{username}/{model_id}" else: return F"{organization}/{model_id}" def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(__UpperCAmelCase, '''local_rank''' ) and args.local_rank not in [-1, 0]: return snake_case_ = args.hub_token if hasattr(__UpperCAmelCase, '''hub_token''' ) else None snake_case_ = get_full_repo_name(__UpperCAmelCase, token=__UpperCAmelCase ) snake_case_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__UpperCAmelCase, model_name=__UpperCAmelCase, repo_name=__UpperCAmelCase, dataset_name=args.dataset_name if hasattr(__UpperCAmelCase, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCAmelCase, '''gradient_accumulation_steps''' ) else None ), adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__UpperCAmelCase, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, ) snake_case_ = os.path.join(args.output_dir, '''README.md''' ) model_card.save(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> Optional[Any]: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ = str(Path(__UpperCAmelCase ).as_posix() ) snake_case_ = re.search(r'''snapshots/([^/]+)/''', __UpperCAmelCase ) if search is None: return None snake_case_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a : str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) a : Optional[Any] = os.path.join(hf_cache_home, 'diffusers') def __magic_name__ ( __UpperCAmelCase = None, __UpperCAmelCase = None ) -> None: '''simple docstring''' if new_cache_dir is None: snake_case_ = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ = old_diffusers_cache snake_case_ = Path(__UpperCAmelCase ).expanduser() snake_case_ = Path(__UpperCAmelCase ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase ) new_blob_path.parent.mkdir(parents=__UpperCAmelCase, exist_ok=__UpperCAmelCase ) os.replace(__UpperCAmelCase, __UpperCAmelCase ) try: os.symlink(__UpperCAmelCase, __UpperCAmelCase ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a : Tuple = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): a : Tuple = 0 else: with open(cache_version_file) as f: try: a : Optional[Any] = int(f.read()) except ValueError: a : List[str] = 0 if cache_version < 1: a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: a : str = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' 'the directory exists and can be written to.' ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> str: '''simple docstring''' if variant is not None: snake_case_ = weights_name.split('''.''' ) snake_case_ = splits[:-1] + [variant] + splits[-1:] snake_case_ = '''.'''.join(__UpperCAmelCase ) return weights_name def __magic_name__ ( __UpperCAmelCase, *, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, ) -> int: '''simple docstring''' snake_case_ = str(__UpperCAmelCase ) if os.path.isfile(__UpperCAmelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCAmelCase ): if os.path.isfile(os.path.join(__UpperCAmelCase, __UpperCAmelCase ) ): # Load from a PyTorch checkpoint snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ): snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) return model_file else: raise EnvironmentError( F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' ) ): try: snake_case_ = hf_hub_download( __UpperCAmelCase, filename=_add_variant(__UpperCAmelCase, __UpperCAmelCase ), cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) warnings.warn( F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", __UpperCAmelCase, ) return model_file except: # noqa: E722 warnings.warn( F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase, __UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase, __UpperCAmelCase )}' so that the correct variant file can be added.", __UpperCAmelCase, ) try: # 2. Load model file as usual snake_case_ = hf_hub_download( __UpperCAmelCase, filename=__UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " '''this model name. Check the model page at ''' F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" F" directory containing a file named {weights_name} or" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " F"containing a file named {weights_name}" )
72
1
"""simple docstring""" from __future__ import annotations def snake_case__ ( __lowerCamelCase : list[int] ): """simple docstring""" return len(set(__lowerCamelCase ) ) == len(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
238
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _lowercase : Tuple = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = ['pixel_values'] def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None: super().__init__(**lowerCamelCase ) lowerCamelCase__ : int =do_rescale lowerCamelCase__ : Dict =rescale_factor lowerCamelCase__ : Union[str, Any] =do_pad lowerCamelCase__ : Union[str, Any] =pad_size def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray: return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]: lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase ) lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase ) def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict: lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase ) if not valid_images(lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images] if do_pad: lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images] lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images] lowerCamelCase__ : Dict ={'''pixel_values''': images} return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
238
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
48
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
48
1
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if "model" in orig_key: lowercase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' ) if "mha.attn" in orig_key: lowercase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase = '''yoso.''' + orig_key return orig_key def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowercase = orig_state_dict.pop(lowerCAmelCase__ ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase = val lowercase = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2 return orig_state_dict def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model_state_dict'''] lowercase = YosoConfig.from_json_file(lowerCAmelCase__ ) lowercase = YosoForMaskedLM(lowerCAmelCase__ ) lowercase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ ) print(model.load_state_dict(lowerCAmelCase__ ) ) model.eval() model.save_pretrained(lowerCAmelCase__ ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) if __name__ == "__main__": lowercase__ :Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowercase__ :Any = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
101
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ :Dict = logging.get_logger(__name__) lowercase__ :Optional[int] = { "BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Any ='''altclip_text_model''' def __init__( self ,A__=2_5_0_0_0_2 ,A__=1_0_2_4 ,A__=2_4 ,A__=1_6 ,A__=4_0_9_6 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_4 ,A__=1 ,A__=0.02 ,A__=0.02 ,A__=1E-05 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=7_6_8 ,**A__ ,): super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = initializer_factor lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = use_cache lowercase = project_dim class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Optional[int] ='''altclip_vision_model''' def __init__( self ,A__=7_6_8 ,A__=3_0_7_2 ,A__=5_1_2 ,A__=1_2 ,A__=1_2 ,A__=3 ,A__=2_2_4 ,A__=3_2 ,A__="quick_gelu" ,A__=1E-5 ,A__=0.0 ,A__=0.02 ,A__=1.0 ,**A__ ,): super().__init__(**A__) lowercase = hidden_size lowercase = intermediate_size lowercase = projection_dim lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = num_channels lowercase = patch_size lowercase = image_size lowercase = initializer_range lowercase = initializer_factor lowercase = attention_dropout lowercase = layer_norm_eps lowercase = hidden_act @classmethod def A__ ( cls ,A__ ,**A__): cls._set_token_in_kwargs(A__) lowercase , lowercase = cls.get_config_dict(A__ ,**A__) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''') == "altclip": lowercase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(A__ ,**A__) class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Any ='''altclip''' lowercase_ : List[Any] =True def __init__( self ,A__=None ,A__=None ,A__=7_6_8 ,A__=2.6592 ,**A__): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). lowercase = kwargs.pop('''text_config_dict''' ,A__) lowercase = kwargs.pop('''vision_config_dict''' ,A__) super().__init__(**A__) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowercase = {} # This is the complete result when using `text_config_dict`. lowercase = AltCLIPTextConfig(**A__).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowercase = ( f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: lowercase = ( f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' f'value `text_config["{key}"]` will be overriden.' ) logger.warning(A__) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: lowercase = {} # This is the complete result when using `vision_config_dict`. lowercase = AltCLIPVisionConfig(**A__).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowercase = { str(A__): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowercase = ( f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' f'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: lowercase = ( f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' f'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(A__) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict) if text_config is None: lowercase = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''') if vision_config is None: lowercase = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''') lowercase = AltCLIPTextConfig(**A__) lowercase = AltCLIPVisionConfig(**A__) lowercase = projection_dim lowercase = logit_scale_init_value lowercase = 1.0 @classmethod def A__ ( cls ,A__ ,A__ ,**A__): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**A__) def A__ ( self): lowercase = copy.deepcopy(self.__dict__) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
101
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCamelCase : '''simple docstring''' @staticmethod def __lowerCamelCase ( *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[Any]): '''simple docstring''' pass @is_pipeline_test @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCamelCase ( self : int): '''simple docstring''' __lowercase =pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) __lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') __lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c']) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(_lowerCAmelCase) , [ [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}], [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}], ] , ) __lowercase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], ] , ) @require_tf def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __lowercase =pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf') __lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') __lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['a', 'b', 'c']) self.assertEqual( nested_simplify(_lowerCAmelCase) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , ) __lowercase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], [ {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, {'score': 0.333, 'label': ANY(_lowerCAmelCase)}, ], ] , ) @slow @require_torch def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes __lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') __lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ {'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}, ] , ) __lowercase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ [ {'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase =pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf') # This is an image of 2 cats with remotes and no planes __lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') __lowercase =image_classifier(_lowerCAmelCase , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ {'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}, ] , ) __lowercase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(_lowerCAmelCase) , [ [ {'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}, ], ] * 5 , )
48
'''simple docstring''' import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = """bart""" lowerCAmelCase__ = ["""past_key_values"""] lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[str] , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Any=4_0_9_6 , _lowerCAmelCase : List[str]=1_6 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Dict=4_0_9_6 , _lowerCAmelCase : Optional[Any]=1_6 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=1_0_2_4 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : str=2 , **_lowerCAmelCase : Optional[int] , ): '''simple docstring''' __lowercase =vocab_size __lowercase =max_position_embeddings __lowercase =d_model __lowercase =encoder_ffn_dim __lowercase =encoder_layers __lowercase =encoder_attention_heads __lowercase =decoder_ffn_dim __lowercase =decoder_layers __lowercase =decoder_attention_heads __lowercase =dropout __lowercase =attention_dropout __lowercase =activation_dropout __lowercase =activation_function __lowercase =init_std __lowercase =encoder_layerdrop __lowercase =decoder_layerdrop __lowercase =classifier_dropout __lowercase =use_cache __lowercase =encoder_layers __lowercase =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowerCAmelCase): __lowercase =self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ 'The config can simply be saved and uploaded again to be fixed.') class _UpperCamelCase ( A ): '''simple docstring''' @property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __lowercase ={0: 'batch'} __lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase ={0: 'batch', 1: 'decoder_sequence'} __lowercase ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs') elif self.task == "causal-lm": # TODO: figure this case out. __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __lowercase , __lowercase =self.num_layers for i in range(_lowerCAmelCase): __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} else: __lowercase =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ]) return common_inputs @property def __lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =super().outputs else: __lowercase =super(_lowerCAmelCase , self).outputs if self.use_past: __lowercase , __lowercase =self.num_layers for i in range(_lowerCAmelCase): __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} __lowercase ={0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) # Generate decoder inputs __lowercase =seq_length if not self.use_past else 1 __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __lowercase , __lowercase =common_inputs['input_ids'].shape __lowercase =common_inputs['decoder_input_ids'].shape[1] __lowercase , __lowercase =self.num_attention_heads __lowercase =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase =decoder_seq_length + 3 __lowercase =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __lowercase =torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1) __lowercase =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered __lowercase , __lowercase =self.num_layers __lowercase =min(_lowerCAmelCase , _lowerCAmelCase) __lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers __lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(_lowerCAmelCase): common_inputs["past_key_values"].append( ( torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase), )) # TODO: test this. __lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(_lowerCAmelCase , _lowerCAmelCase): common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase))) return common_inputs def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __lowercase , __lowercase =common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase =seqlen + 2 __lowercase , __lowercase =self.num_layers __lowercase , __lowercase =self.num_attention_heads __lowercase =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __lowercase =common_inputs['attention_mask'].dtype __lowercase =torch.cat( [common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1) __lowercase =[ (torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase) ] return common_inputs def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' __lowercase =compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase) __lowercase =compute_effective_axis_dimension( _lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase) # Generate dummy inputs according to compute batch and sequence __lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size __lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase)) return common_inputs def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) elif self.task == "causal-lm": __lowercase =self._generate_dummy_inputs_for_causal_lm( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) else: __lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase) return common_inputs def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) else: __lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
48
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable A : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ["DPTFeatureExtractor"] A : str = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
57
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A__ : def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]: '''simple docstring''' A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = num_choices A_ = scope A_ = self.vocab_size - 1 def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any: '''simple docstring''' A_ = OpenAIGPTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ ) A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) A_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]: '''simple docstring''' A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int: '''simple docstring''' A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' A_ = self.num_labels A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ): lowercase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]: '''simple docstring''' A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , ) A_ = inputs_dict["""labels"""] A_ = inputs_dict["""labels"""] A_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , ) A_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def snake_case_ ( self ) -> Union[str, Any]: '''simple docstring''' A_ = OpenAIGPTModelTester(self ) A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 ) def snake_case_ ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ ) def snake_case_ ( self ) -> Optional[int]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ ) def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ ) def snake_case_ ( self ) -> List[str]: '''simple docstring''' A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ ) @slow def snake_case_ ( self ) -> List[Any]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class A__ ( unittest.TestCase ): @slow def snake_case_ ( self ) -> Tuple: '''simple docstring''' A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(UpperCamelCase__ ) A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is A_ = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ ) self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
162
0
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class A__ ( unittest.TestCase ): def snake_case_ ( self ) -> List[Any]: '''simple docstring''' A_ = """hf-internal-testing/tiny-random-t5""" A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) A_ = tokenizer("""This is me""" , return_tensors="""pt""" ) A_ = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) A_ = model.generate(**UpperCamelCase__ ) A_ = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) A_ = model_reloaded.generate(**UpperCamelCase__ ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = """hf-internal-testing/tiny-random-t5""" A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) A_ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase__ ): model.save_pretrained(UpperCamelCase__ ) A_ = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase__ )
101
'''simple docstring''' from __future__ import annotations from typing import Any def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: if not postfix_notation: return 0 A_ = {"""+""", """-""", """*""", """/"""} A_ = [] for token in postfix_notation: if token in operations: A_ , A_ = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
101
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['image_processor', 'tokenizer'] lowercase__ = 'OwlViTImageProcessor' lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __a=None , __a=None , **__a) -> int: '''simple docstring''' _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __a , ) _UpperCamelCase = kwargs.pop('''feature_extractor''') _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(__a , __a) def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]: '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''') if text is not None: if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)): _UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)] elif isinstance(__a , __a) and isinstance(text[0] , __a): _UpperCamelCase = [] # Maximum number of queries across batch _UpperCamelCase = max([len(__a) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__a) != max_num_queries: _UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a)) _UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a) encodings.append(__a) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''') if return_tensors == "np": _UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0) _UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0) else: raise ValueError('''Target return tensor type could not be returned''') _UpperCamelCase = BatchEncoding() _UpperCamelCase = input_ids _UpperCamelCase = attention_mask if query_images is not None: _UpperCamelCase = BatchEncoding() _UpperCamelCase = self.image_processor( __a , return_tensors=__a , **__a).pixel_values _UpperCamelCase = query_pixel_values if images is not None: _UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a) if text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a) , tensor_type=__a) def UpperCAmelCase ( self , *__a , **__a) -> Any: '''simple docstring''' return self.image_processor.post_process(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> List[Any]: '''simple docstring''' return self.image_processor.post_process_object_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Tuple: '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> int: '''simple docstring''' return self.tokenizer.decode(*__a , **__a) @property def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , ) return self.image_processor_class @property def UpperCAmelCase ( self) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , ) return self.image_processor
194
"""simple docstring""" class _UpperCAmelCase: def __init__( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = {} def UpperCAmelCase ( self) -> None: '''simple docstring''' print(self.vertex) for i in self.vertex: print(__a , ''' -> ''' , ''' -> '''.join([str(__a) for j in self.vertex[i]])) def UpperCAmelCase ( self , __a , __a) -> None: '''simple docstring''' # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__a) else: # else make a new vertex _UpperCamelCase = [to_vertex] def UpperCAmelCase ( self) -> None: '''simple docstring''' # visited array for storing already visited nodes _UpperCamelCase = [False] * len(self.vertex) # call the recursive helper function for i in range(len(self.vertex)): if not visited[i]: self.dfs_recursive(__a , __a) def UpperCAmelCase ( self , __a , __a) -> None: '''simple docstring''' # mark start vertex as visited _UpperCamelCase = True print(__a , end=''' ''') # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__a , __a) if __name__ == "__main__": _a = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
194
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor A : Union[str, Any] = logging.get_logger(__name__) class _UpperCamelCase ( __lowercase ): '''simple docstring''' def __init__( self , *__a , **__a ): warnings.warn( "The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DeiTImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
363
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : List[str] = { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json" ), "distilbert-base-uncased-finetuned-sst-2-english": ( "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json" ), } class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : List[str] ="""distilbert""" __UpperCAmelCase : int ={ """hidden_size""": """dim""", """num_attention_heads""": """n_heads""", """num_hidden_layers""": """n_layers""", } def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ): __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = sinusoidal_pos_embds __lowerCAmelCase = n_layers __lowerCAmelCase = n_heads __lowerCAmelCase = dim __lowerCAmelCase = hidden_dim __lowerCAmelCase = dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation __lowerCAmelCase = initializer_range __lowerCAmelCase = qa_dropout __lowerCAmelCase = seq_classif_dropout super().__init__(**__a , pad_token_id=__a ) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' @property def snake_case ( self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
259
0
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
221
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCamelCase : str = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
184
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = tempfile.mkdtemp() lowerCAmelCase__ : List[Any] = 8 # DPR tok lowerCAmelCase__ : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok lowerCAmelCase__ : str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""} lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def UpperCAmelCase_ ( self ) -> Any: shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" ) lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__UpperCAmelCase ) rag_tokenizer.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) lowerCAmelCase__ : Optional[Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) lowerCAmelCase__ : str = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
184
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any]=None ): if subparsers is not None: lowerCAmelCase = subparsers.add_parser('env' ) else: lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=_UpperCAmelCase ) return parser def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): lowerCAmelCase = torch.__version__ lowerCAmelCase = torch.cuda.is_available() lowerCAmelCase = is_xpu_available() lowerCAmelCase = is_npu_available() lowerCAmelCase = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(_UpperCAmelCase ): lowerCAmelCase = load_config_from_file(args.config_file ).to_dict() lowerCAmelCase = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'PyTorch XPU available': str(_UpperCAmelCase ), 'PyTorch NPU available': str(_UpperCAmelCase ), 'System RAM': F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB', } if pt_cuda_available: lowerCAmelCase = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F'- {prop}: {val}' for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) lowerCAmelCase = ( '\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else F'\t{accelerate_config}' ) print(_UpperCAmelCase ) lowerCAmelCase = accelerate_config return info def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = env_command_parser() lowerCAmelCase = parser.parse_args() env_command(_UpperCAmelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
360
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
0
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =256 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =cva.imread(lowerCAmelCase, 0 ) lowerCamelCase_ =copy.deepcopy(self.img ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =plt.hist(self.img.ravel(), 256, [0, 256], label='''x''' ) lowerCamelCase_ =np.sum(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) ): lowerCamelCase_ =x[i] / self.k self.sk += prk lowerCamelCase_ =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ =int(last % last ) lowerCamelCase_ =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase ) lowerCamelCase_ =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ =self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ =self.last_list[num] cva.imwrite('''output_data/output.jpg''', self.img ) def lowercase__ ( self ): """simple docstring""" plt.hist(self.img.ravel(), 256, [0, 256] ) def lowercase__ ( self ): """simple docstring""" cva.imshow('''Output-Image''', self.img ) cva.imshow('''Input-Image''', self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": a_ : str = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") a_ : Optional[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
75
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : List[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> str: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Tuple ) -> List[str]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : Dict ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
75
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ =[ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
364
from math import pi, sqrt def __UpperCamelCase ( lowerCAmelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) if num > 1_71.5: raise OverflowError('''math range error''' ) elif num - int(lowerCAmelCase__ ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(lowerCAmelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def __UpperCamelCase ( ): assert gamma(0.5 ) == sqrt(lowerCAmelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() lowercase__ =1.0 while num: lowercase__ =float(input('Gamma of: ')) print(F"""gamma({num}) = {gamma(num)}""") print('\nEnter 0 to exit...')
90
0
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": __A : List[str] = input('Enter image url: ').strip() print(F"""Downloading image from {url} ...""") __A : List[str] = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image __A : List[Any] = soup.find('meta', {'property': 'og:image'})['content'] __A : Union[str, Any] = requests.get(image_url).content __A : Any = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
154
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Tuple = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
154
1
import os def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Union[str, Any] = len(grid[0] ) __snake_case : Tuple = len(lowerCamelCase__ ) __snake_case : Union[str, Any] = 0 __snake_case : int = 0 __snake_case : Optional[int] = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowerCamelCase__ ): for j in range(n_rows - 3 ): __snake_case : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __snake_case : List[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __snake_case : int = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __snake_case : List[Any] = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __snake_case : Any = max( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if max_product > largest: __snake_case : Tuple = max_product return largest def lowerCAmelCase_ ( ): __snake_case : List[Any] = [] with open(os.path.dirname(lowerCamelCase__ ) + "/grid.txt" ) as file: for line in file: grid.append(line.strip("\n" ).split(" " ) ) __snake_case : Dict = [[int(lowerCamelCase__ ) for i in grid[j]] for j in range(len(lowerCamelCase__ ) )] return largest_product(lowerCamelCase__ ) if __name__ == "__main__": print(solution())
353
from ..utils import DummyObject, requires_backends class a (metaclass=_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : int = ["speech"] def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict: requires_backends(self , ["speech"] ) class a (metaclass=_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["speech"] def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]: requires_backends(self , ["speech"] )
134
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = ['''image_processor''', '''tokenizer'''] A__ = '''ViltImageProcessor''' A__ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__(self : List[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=None , **_UpperCAmelCase : Tuple ) -> str: """simple docstring""" lowercase__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _UpperCAmelCase , ) lowercase__ = kwargs.pop("""feature_extractor""" ) lowercase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ = self.image_processor def __call__(self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Optional[int] , ) -> BatchEncoding: """simple docstring""" lowercase__ = self.tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) # add pixel_values + pixel_mask lowercase__ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) encoding.update(_UpperCAmelCase ) return encoding def lowerCamelCase__ (self : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase__ (self : int , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCamelCase__ (self : List[str] ) -> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCamelCase__ (self : int ) -> int: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCAmelCase , ) return self.image_processor_class @property def lowerCamelCase__ (self : Any ) -> List[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCAmelCase , ) return self.image_processor
305
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase__ = tmp_path / """file.csv""" lowercase__ = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(__magic_name__ , """w""" ) as f: f.write(__magic_name__ ) return str(__magic_name__ ) @pytest.fixture def UpperCamelCase ( __magic_name__ : str ) -> Tuple: """simple docstring""" lowercase__ = tmp_path / """malformed_file.csv""" lowercase__ = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(__magic_name__ , """w""" ) as f: f.write(__magic_name__ ) return str(__magic_name__ ) @pytest.fixture def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str: """simple docstring""" lowercase__ = tmp_path / """csv_with_image.csv""" lowercase__ = textwrap.dedent( f'''\ image {image_file} ''' ) with open(__magic_name__ , """w""" ) as f: f.write(__magic_name__ ) return str(__magic_name__ ) @pytest.fixture def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = tmp_path / """csv_with_label.csv""" lowercase__ = textwrap.dedent( """\ label good bad good """ ) with open(__magic_name__ , """w""" ) as f: f.write(__magic_name__ ) return str(__magic_name__ ) @pytest.fixture def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ = tmp_path / """csv_with_int_list.csv""" lowercase__ = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(__magic_name__ , """w""" ) as f: f.write(__magic_name__ ) return str(__magic_name__ ) def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" lowercase__ = Csv() lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(__magic_name__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" with open(__magic_name__ , encoding="""utf-8""" ) as f: lowercase__ = f.read().splitlines()[1] lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) lowercase__ = csv._generate_tables([[csv_file_with_image]] ) lowercase__ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() lowercase__ = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str: """simple docstring""" with open(__magic_name__ , encoding="""utf-8""" ) as f: lowercase__ = f.read().splitlines()[1:] lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) lowercase__ = csv._generate_tables([[csv_file_with_label]] ) lowercase__ = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() lowercase__ = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels] def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} ) lowercase__ = csv._generate_tables([[csv_file_with_int_list]] ) lowercase__ = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) lowercase__ = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
305
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
364
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A : Union[str, Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __A : Tuple = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __A : List[str] = spec.loader.load_module() __A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __A : List[str] = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): _UpperCAmelCase = False # source code of `config_class` _UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _UpperCAmelCase , _UpperCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link _UpperCAmelCase = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: _UpperCAmelCase = True break _UpperCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
326
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''allenai/led-base-16384''': 1_63_84, } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = LEDTokenizer SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""] def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ): super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop("""type""" ) ) UpperCamelCase__ = add_prefix_space UpperCamelCase__ = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCamelCase__ = """post_processor""" UpperCamelCase__ = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase__ = tuple(state["""sep"""] ) if "cls" in state: UpperCamelCase__ = tuple(state["""cls"""] ) UpperCamelCase__ = False if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase__ = add_prefix_space UpperCamelCase__ = True if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase__ = trim_offsets UpperCamelCase__ = True if changes_to_apply: UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , state.pop("""type""" ) ) UpperCamelCase__ = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def UpperCAmelCase_ (self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase__ = value def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): UpperCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ = [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase__ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase__ = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ ) if needs_to_be_padded: UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase__ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase__ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
244
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''', '''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''', '''uclanlp/visualbert-vqa-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''', '''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''', '''uclanlp/visualbert-vcr-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json''' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """visual_bert""" def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = vocab_size UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = hidden_size UpperCamelCase__ = visual_embedding_dim UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = initializer_range UpperCamelCase__ = type_vocab_size UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = bypass_transformer UpperCamelCase__ = special_visual_initialize
244
1
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand A_ = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) A_ = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) A_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) A_ = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) A_ = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) A_ = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) A_ = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Dict = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) ) _snake_case : Optional[int] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] _snake_case : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def UpperCAmelCase__ (snake_case__ : Tuple = 1_00 ): """simple docstring""" return (generate_random_hand() for _ in range(__UpperCAmelCase )) @pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] ): """simple docstring""" assert PokerHand(__UpperCAmelCase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int ): """simple docstring""" assert PokerHand(__UpperCAmelCase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] ): """simple docstring""" _snake_case : Tuple = PokerHand(__UpperCAmelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Dict ): """simple docstring""" assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" assert PokerHand(__UpperCAmelCase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , __UpperCAmelCase ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int ): """simple docstring""" assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ): """simple docstring""" assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS] _snake_case : Any = poker_hands.copy() shuffle(__UpperCAmelCase ) _snake_case : Union[str, Any] = chain(sorted(__UpperCAmelCase ) ) for index, hand in enumerate(__UpperCAmelCase ): assert hand == poker_hands[index] def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=__UpperCAmelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = PokerHand("""2C 4S AS 3D 5C""" ) _snake_case : Union[str, Any] = True _snake_case : Tuple = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def UpperCAmelCase__ (): """simple docstring""" _snake_case : Tuple = 0 _snake_case : Any = os.path.abspath(os.path.dirname(__UpperCAmelCase ) ) _snake_case : List[str] = os.path.join(__UpperCAmelCase , """poker_hands.txt""" ) with open(__UpperCAmelCase ) as file_hand: for line in file_hand: _snake_case : Union[str, Any] = line[:14].strip() _snake_case : Optional[int] = line[15:].strip() _snake_case : List[Any] = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase ) _snake_case : List[str] = player.compare_with(__UpperCAmelCase ) if output == "Win": answer += 1 assert answer == 3_76
352
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ = logging.get_logger(__name__) A_ = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowercase( __a , __a ): '''simple docstring''' lowercase__ = "swin" lowercase__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Any, a_: List[str]=224, a_: List[Any]=4, a_: List[Any]=3, a_: Dict=96, a_: List[str]=[2, 2, 6, 2], a_: int=[3, 6, 12, 24], a_: int=7, a_: str=4.0, a_: Optional[Any]=True, a_: Dict=0.0, a_: List[Any]=0.0, a_: List[str]=0.1, a_: Union[str, Any]="gelu", a_: Dict=False, a_: Union[str, Any]=0.02, a_: Optional[int]=1E-5, a_: Optional[int]=32, a_: Tuple=None, a_: Union[str, Any]=None, **a_: Any, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : Any = image_size _snake_case : List[Any] = patch_size _snake_case : Tuple = num_channels _snake_case : str = embed_dim _snake_case : Union[str, Any] = depths _snake_case : int = len(a_ ) _snake_case : Union[str, Any] = num_heads _snake_case : List[str] = window_size _snake_case : str = mlp_ratio _snake_case : Union[str, Any] = qkv_bias _snake_case : Dict = hidden_dropout_prob _snake_case : str = attention_probs_dropout_prob _snake_case : Union[str, Any] = drop_path_rate _snake_case : Optional[int] = hidden_act _snake_case : str = use_absolute_embeddings _snake_case : Tuple = layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Optional[Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _snake_case : Any = int(embed_dim * 2 ** (len(a_ ) - 1) ) _snake_case : Any = ["""stem"""] + [f"stage{idx}" for idx in range(1, len(a_ ) + 1 )] _snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices( out_features=a_, out_indices=a_, stage_names=self.stage_names ) class lowercase( __a ): '''simple docstring''' lowercase__ = version.parse("1.11" ) @property def UpperCamelCase_ ( self: Any ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return 1E-4
132
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : List[str] = 'speech_to_text_2' UpperCAmelCase__ : Dict = ['past_key_values'] UpperCAmelCase__ : Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self: Optional[int] , UpperCamelCase_: List[str]=1_00_00 , UpperCamelCase_: int=6 , UpperCamelCase_: Dict=20_48 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: int="relu" , UpperCamelCase_: Optional[Any]=2_56 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.02 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Any=2 , UpperCamelCase_: Optional[int]=10_24 , **UpperCamelCase_: Optional[Any] , ): __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = decoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_target_positions super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
12
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib lowercase__ = get_logger() lowercase__ = None class __lowerCamelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ): '''simple docstring''' def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple ): super().__init__(features=a_ ) import jax from jaxlib.xla_client import Device if isinstance(a_ , a_ ): raise ValueError( f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) lowerCAmelCase_ : List[Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCAmelCase_ : Dict = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) lowerCAmelCase_ : Tuple = str(jax.devices()[0] ) lowerCAmelCase_ : Dict = jnp_array_kwargs @staticmethod def lowerCamelCase ( ): import jax return {str(a_ ): device for device in jax.devices()} def lowerCamelCase ( self : Tuple , a_ : Dict ): import jax import jax.numpy as jnp if isinstance(a_ , a_ ) and column: if all( isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(a_ , axis=0 ) return column def lowerCamelCase ( self : Tuple , a_ : Tuple ): import jax import jax.numpy as jnp if isinstance(a_ , (str, bytes, type(a_ )) ): return value elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() lowerCAmelCase_ : Any = {} if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: lowerCAmelCase_ : List[Any] = {"dtype": jnp.intaa} else: lowerCAmelCase_ : Tuple = {"dtype": jnp.intaa} elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): lowerCAmelCase_ : Optional[int] = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(a_ , PIL.Image.Image ): lowerCAmelCase_ : List[str] = np.asarray(a_ ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: lowerCAmelCase_ : Dict = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} ) def lowerCamelCase ( self : Any , a_ : List[str] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(a_ , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(a_ , "__array__" ) and not isinstance(a_ , jax.Array ): lowerCAmelCase_ : str = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(a_ , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] ) elif isinstance(a_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] ) return self._tensorize(a_ ) def lowerCamelCase ( self : Dict , a_ : dict ): return map_nested(self._recursive_tensorize , a_ , map_list=a_ ) def lowerCamelCase ( self : List[str] , a_ : pa.Table ): lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_row(a_ ) lowerCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(a_ ) return self.recursive_tensorize(a_ ) def lowerCamelCase ( self : Any , a_ : pa.Table ): lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(a_ ) lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] ) lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ ) lowerCAmelCase_ : Optional[Any] = self._consolidate(a_ ) return column def lowerCamelCase ( self : Tuple , a_ : pa.Table ): lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ ) lowerCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(a_ ) lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ ) for column_name in batch: lowerCAmelCase_ : Tuple = self._consolidate(batch[column_name] ) return batch
241
0
import math def lowerCAmelCase_ (lowerCAmelCase__: int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ (lowerCAmelCase__: float = 0.1 ): """simple docstring""" UpperCAmelCase_: Any = 3 UpperCAmelCase_: Any = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(lowerCAmelCase__ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
82
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets a : Dict = datasets.logging.get_logger(__name__) a : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' a : int = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' a : List[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Dict=False , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Any=True , lowerCAmelCase__: Union[str, Any]=False , lowerCAmelCase__: List[Any]="dummy_doc" ): """simple docstring""" UpperCAmelCase_: str = {doc: key_lines} UpperCAmelCase_: str = {doc: sys_lines} UpperCAmelCase_: Optional[Any] = {} UpperCAmelCase_: Optional[int] = 0 UpperCAmelCase_: Optional[Any] = 0 UpperCAmelCase_: str = 0 UpperCAmelCase_: List[Any] = 0 UpperCAmelCase_: Tuple = 0 UpperCAmelCase_: Union[str, Any] = 0 UpperCAmelCase_ , UpperCAmelCase_: List[str] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ ) key_singletons_num += singletons_num if NP_only or min_span: UpperCAmelCase_: List[str] = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_: Any = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ ) sys_singletons_num += singletons_num if NP_only or min_span: UpperCAmelCase_: Tuple = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ ) if remove_nested: UpperCAmelCase_ , UpperCAmelCase_: str = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters UpperCAmelCase_: Tuple = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_: Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_: Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( """Number of resulting singleton clusters in the key """ F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' """files, respectively""" ) return doc_coref_infos def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int ): """simple docstring""" UpperCAmelCase_: Tuple = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_: Any = {} UpperCAmelCase_: Tuple = 0 UpperCAmelCase_: Optional[Any] = 0 for name, metric in metrics: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , ) if conll_subparts_num == 3: UpperCAmelCase_: List[str] = (conll / 3) * 1_0_0 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({"""conll_score""": conll} ) return output_scores def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ): """simple docstring""" UpperCAmelCase_: Dict = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: UpperCAmelCase_: Any = line.split()[5] if not parse_col == "-": UpperCAmelCase_: List[str] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def __snake_case (self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ), codebase_urls=["""https://github.com/ns-moosavi/coval"""], reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ], ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> int: UpperCAmelCase_: Tuple = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: UpperCAmelCase_: str = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" UpperCAmelCase_: Tuple = evaluate( key_lines=SCREAMING_SNAKE_CASE_, sys_lines=SCREAMING_SNAKE_CASE_, metrics=SCREAMING_SNAKE_CASE_, NP_only=SCREAMING_SNAKE_CASE_, remove_nested=SCREAMING_SNAKE_CASE_, keep_singletons=SCREAMING_SNAKE_CASE_, min_span=SCREAMING_SNAKE_CASE_, ) return score
82
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _UpperCAmelCase : str = logging.get_logger(__name__) class a__ ( a__ ): """simple docstring""" def __init__(self , *__lowercase , **__lowercase ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
174
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): lowercase__ , lowercase__ : Optional[int] = [], [] while len(UpperCAmelCase ) > 1: lowercase__ , lowercase__ : List[str] = min(UpperCAmelCase ), max(UpperCAmelCase ) start.append(UpperCAmelCase ) end.append(UpperCAmelCase ) collection.remove(UpperCAmelCase ) collection.remove(UpperCAmelCase ) end.reverse() return start + collection + end if __name__ == "__main__": __a: Optional[int] = input("""Enter numbers separated by a comma:\n""").strip() __a: Tuple = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
198
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase_ = logging.getLogger() def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase : Dict = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase : Optional[int] = parser.parse_args() return args.f def snake_case( __magic_name__ ) -> Union[str, Any]: '''simple docstring''' lowercase : Dict = {} lowercase : List[Any] = os.path.join(__magic_name__ , '''all_results.json''' ) if os.path.exists(__magic_name__ ): with open(__magic_name__ , '''r''' ) as f: lowercase : str = json.load(__magic_name__ ) else: raise ValueError(F"""can't find {path}""" ) return results def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase : int = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _A ( _lowerCamelCase ): @classmethod def __a ( cls : str ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = tempfile.mkdtemp() lowercase : Dict = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase : Any = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __a ( cls : Dict ) -> str: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : List[str] ) -> Tuple: """simple docstring""" lowercase : Tuple = self.get_auto_remove_tmp_dir() lowercase : str = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase : List[Any] = get_results(_A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : List[Any] ) -> Tuple: """simple docstring""" lowercase : List[Any] = self.get_auto_remove_tmp_dir() lowercase : int = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase : Optional[int] = get_results(_A ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : Any ) -> str: """simple docstring""" lowercase : Any = self.get_auto_remove_tmp_dir() lowercase : Union[str, Any] = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : Dict = get_results(_A ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : List[str] ) -> Tuple: """simple docstring""" lowercase : List[str] = 7 if get_gpu_count() > 1 else 2 lowercase : List[Any] = self.get_auto_remove_tmp_dir() lowercase : int = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : Any = get_results(_A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : Dict ) -> Dict: """simple docstring""" lowercase : List[str] = self.get_auto_remove_tmp_dir() lowercase : int = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : str = get_results(_A ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : Dict ) -> List[Any]: """simple docstring""" lowercase : int = self.get_auto_remove_tmp_dir() lowercase : Optional[int] = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : Tuple = get_results(_A ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_A , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : Dict = self.get_auto_remove_tmp_dir() lowercase : str = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : Tuple = get_results(_A ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowercase : List[Any] = self.get_auto_remove_tmp_dir() lowercase : str = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) lowercase : Tuple = get_results(_A ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(_A , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''translation_no_trainer''' ) ) ) @slow def __a ( self : Dict ) -> List[Any]: """simple docstring""" lowercase : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_A ) lowercase : Dict = self.get_auto_remove_tmp_dir() lowercase : List[str] = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) lowercase : int = get_results(_A ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __a ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowercase : Tuple = self.get_auto_remove_tmp_dir() lowercase : Tuple = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase : List[str] = get_results(_A ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_A , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''image_classification_no_trainer''' ) ) )
116
import os from collections.abc import Iterator def snake_case( __magic_name__ = "." ) -> Iterator[str]: '''simple docstring''' for dir_path, dir_names, filenames in os.walk(__magic_name__ ): lowercase : Tuple = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(__magic_name__ )[1] in (".py", ".ipynb"): yield os.path.join(__magic_name__ , __magic_name__ ).lstrip('''./''' ) def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' return F"""{i * ' '}*""" if i else "\n##" def snake_case( __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' lowercase : Dict = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(__magic_name__ ) or old_parts[i] != new_part) and new_part: print(F"""{md_prefix(__magic_name__ )} {new_part.replace('_' , ' ' ).title()}""" ) return new_path def snake_case( __magic_name__ = "." ) -> None: '''simple docstring''' lowercase : str = '''''' for filepath in sorted(good_file_paths(__magic_name__ ) ): lowercase , lowercase : Optional[int] = os.path.split(__magic_name__ ) if filepath != old_path: lowercase : str = print_path(__magic_name__ , __magic_name__ ) lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0 lowercase : Optional[Any] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' ) lowercase : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(F"""{md_prefix(__magic_name__ )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md('.')
116
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { 'configuration_roberta_prelayernorm': [ 'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaPreLayerNormConfig', 'RobertaPreLayerNormOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ 'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ 'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ 'FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=24 , __lowerCAmelCase=2 , __lowerCAmelCase=6 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=1000 , ) -> Union[str, Any]: lowercase__ : Optional[Any] = parent lowercase__ : Tuple = batch_size lowercase__ : List[Any] = seq_length lowercase__ : str = is_training lowercase__ : List[str] = use_input_mask lowercase__ : Any = use_token_type_ids lowercase__ : Union[str, Any] = use_labels lowercase__ : Dict = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Optional[int] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : List[str] = initializer_range lowercase__ : List[Any] = num_labels lowercase__ : int = scope lowercase__ : Union[str, Any] = range_bbox def _lowerCAmelCase( self ) -> List[str]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase__ : Dict = bbox[i, j, 3] lowercase__ : Dict = bbox[i, j, 1] lowercase__ : Any = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase__ : int = bbox[i, j, 2] lowercase__ : Optional[Any] = bbox[i, j, 0] lowercase__ : Union[str, Any] = t lowercase__ : int = None if self.use_input_mask: lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowercase__ : int = None if self.use_token_type_ids: lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = None lowercase__ : Optional[int] = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Union[str, Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _lowerCAmelCase( self ) -> Tuple: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Optional[Any]: lowercase__ : List[str] = LiltModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : int = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowercase__ : int = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowercase__ : Tuple = model(__lowerCAmelCase , bbox=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Dict: lowercase__ : int = self.num_labels lowercase__ : str = LiltForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : Optional[Any] = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> str: lowercase__ : int = LiltForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : Any = model( __lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : List[str] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = config_and_inputs lowercase__ : Dict = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: return True def _lowerCAmelCase( self ) -> int: lowercase__ : Tuple = LiltModelTester(self ) lowercase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def _lowerCAmelCase( self ) -> Optional[int]: self.config_tester.run_common_tests() def _lowerCAmelCase( self ) -> Any: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : List[Any] = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) @slow def _lowerCAmelCase( self ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = LiltModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Optional[int] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__lowerCAmelCase ) lowercase__ : Tuple = torch.tensor([[1, 2]] , device=__lowerCAmelCase ) lowercase__ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase ) lowercase__ : str = torch.Size([1, 2, 768] ) lowercase__ : List[Any] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__lowerCAmelCase , ) self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) )
214
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): lowercase__ : Dict = len(UpperCAmelCase ) print('''The following activities are selected:''' ) # The first activity is always selected lowercase__ : str = 0 print(UpperCAmelCase , end=''',''' ) # Consider rest of the activities for j in range(UpperCAmelCase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(UpperCAmelCase , end=''',''' ) lowercase__ : str = j if __name__ == "__main__": import doctest doctest.testmod() __a: str = [1, 3, 0, 5, 8, 5] __a: Optional[Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
214
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) lowercase__ : List[str] = logging.getLogger() def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict: """simple docstring""" lowerCAmelCase_ : Dict = {} lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' ) if os.path.exists(lowerCAmelCase__ ): with open(lowerCAmelCase__ , 'r' ) as f: lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ ) else: raise ValueError(f"can't find {path}" ) return results lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : Any ): import xla_spawn lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir() lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ): lowerCAmelCase_ : Optional[Any] = time() xla_spawn.main() lowerCAmelCase_ : Any = time() lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 5_0_0 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): import xla_spawn lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ): xla_spawn.main()
224
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) lowerCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) lowerCAmelCase_ : int = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Any ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase_ : Optional[int] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase_ : int = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(F"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowerCAmelCase_ : List[Any] = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" ) lowerCAmelCase_ : Any = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Dict = Accelerator() lowercase__ : List[Any] = (accelerator.state.process_index + 2, 1_0) lowercase__ : Any = torch.randint(0, 1_0, shape).to(accelerator.device) lowercase__ : List[Any] = """""" lowercase__ : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowercase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
224
1
from PIL import Image def snake_case ( snake_case__ :Image) -> Image: _A , _A = image.size _A = 0 _A = image.load() for i in range(lowerCAmelCase__): for j in range(lowerCAmelCase__): _A = pixels[j, i] mean += pixel mean //= width * height for j in range(lowerCAmelCase__): for i in range(lowerCAmelCase__): _A = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _SCREAMING_SNAKE_CASE = mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
360
def snake_case ( snake_case__ :str , snake_case__ :str) -> list: _A = len(snake_case__) _A = [] for i in range(len(snake_case__) - pat_len + 1): _A = True for j in range(snake_case__): if s[i + j] != pattern[j]: _A = False break if match_found: position.append(snake_case__) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
81
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType a__ : Any = logging.get_logger(__name__) a__ : List[str] = { "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json", } class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE): """simple docstring""" snake_case__ : Union[str, Any] = "layoutlmv3" def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=5_0_2_6_5 , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : List[Any]=1_2 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Any=5_1_2 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-5 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=1_0_2_4 , UpperCAmelCase__ : int=1_2_8 , UpperCAmelCase__ : str=1_2_8 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : int=1_2_8 , UpperCAmelCase__ : Optional[int]=6_4 , UpperCAmelCase__ : str=2_5_6 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str=2_2_4 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : int , ) -> Dict: super().__init__( vocab_size=A_ , hidden_size=A_ , num_hidden_layers=A_ , num_attention_heads=A_ , intermediate_size=A_ , hidden_act=A_ , hidden_dropout_prob=A_ , attention_probs_dropout_prob=A_ , max_position_embeddings=A_ , type_vocab_size=A_ , initializer_range=A_ , layer_norm_eps=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ , ) __SCREAMING_SNAKE_CASE = max_ad_position_embeddings __SCREAMING_SNAKE_CASE = coordinate_size __SCREAMING_SNAKE_CASE = shape_size __SCREAMING_SNAKE_CASE = has_relative_attention_bias __SCREAMING_SNAKE_CASE = rel_pos_bins __SCREAMING_SNAKE_CASE = max_rel_pos __SCREAMING_SNAKE_CASE = has_spatial_attention_bias __SCREAMING_SNAKE_CASE = rel_ad_pos_bins __SCREAMING_SNAKE_CASE = max_rel_ad_pos __SCREAMING_SNAKE_CASE = text_embed __SCREAMING_SNAKE_CASE = visual_embed __SCREAMING_SNAKE_CASE = input_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( _SCREAMING_SNAKE_CASE): """simple docstring""" snake_case__ : Union[str, Any] = version.parse("1.12") @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) @property def UpperCAmelCase_ ( self : int ) -> float: return 1E-5 @property def UpperCAmelCase_ ( self : Any ) -> int: return 1_2 def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple = -1 , UpperCAmelCase__ : List[Any] = -1 , UpperCAmelCase__ : str = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Dict = 3 , UpperCAmelCase__ : Optional[Any] = 4_0 , UpperCAmelCase__ : Any = 4_0 , ) -> Mapping[str, Any]: setattr(processor.image_processor , "apply_ocr" , A_ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __SCREAMING_SNAKE_CASE = processor.tokenizer.num_special_tokens_to_add(A_ ) __SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence __SCREAMING_SNAKE_CASE = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __SCREAMING_SNAKE_CASE = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __SCREAMING_SNAKE_CASE = self._generate_dummy_images(A_ , A_ , A_ , A_ ) __SCREAMING_SNAKE_CASE = dict( processor( A_ , text=A_ , boxes=A_ , return_tensors=A_ , ) ) return inputs
54
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } UpperCamelCase = Dataset.from_dict(lowercase ) return dataset class lowercase ( _SCREAMING_SNAKE_CASE ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase = make_duplicate_clusters(A_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase , UpperCamelCase = deduplicate_dataset(A_ ) self.assertEqual(len(A_ ) , 2 ) print(A_ ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , A_ )
222
0
'''simple docstring''' def UpperCAmelCase_ ( ): lowercase_ :Optional[int] = [] lowercase_ :Any = 1 while len(__lowerCamelCase ) < 1e6: constant.append(str(__lowerCamelCase ) ) i += 1 lowercase_ :Optional[Any] = "".join(__lowerCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
367
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class a_ ( _lowerCAmelCase ): def __init__( self : Any , lowercase : int , lowercase : Union[str, Any]=13 , lowercase : List[str]=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : Tuple=True , lowercase : int=True , lowercase : List[Any]=99 , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : List[str]=37 , lowercase : Tuple="gelu" , lowercase : List[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : List[str]=512 , lowercase : str=16 , lowercase : Tuple=2 , lowercase : List[Any]=0.02 , lowercase : Dict=False , lowercase : Dict=True , lowercase : int="None" , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : List[Any]=None , ): """simple docstring""" lowercase_ :int = parent lowercase_ :str = batch_size lowercase_ :Tuple = seq_length lowercase_ :Union[str, Any] = is_training lowercase_ :Dict = use_input_mask lowercase_ :Any = use_token_type_ids lowercase_ :Tuple = use_labels lowercase_ :Dict = vocab_size lowercase_ :Tuple = hidden_size lowercase_ :Union[str, Any] = num_hidden_layers lowercase_ :int = num_attention_heads lowercase_ :List[Any] = intermediate_size lowercase_ :Tuple = hidden_act lowercase_ :str = hidden_dropout_prob lowercase_ :Any = attention_probs_dropout_prob lowercase_ :List[Any] = max_position_embeddings lowercase_ :Union[str, Any] = type_vocab_size lowercase_ :Union[str, Any] = type_sequence_label_size lowercase_ :Any = initializer_range lowercase_ :List[Any] = num_labels lowercase_ :str = num_choices lowercase_ :Optional[Any] = relative_attention lowercase_ :Tuple = position_biased_input lowercase_ :Union[str, Any] = pos_att_type lowercase_ :Tuple = scope def lowercase__ ( self : Dict ): """simple docstring""" lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :Union[str, Any] = None if self.use_input_mask: lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowercase_ :List[Any] = None if self.use_token_type_ids: lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ :str = None lowercase_ :Union[str, Any] = None lowercase_ :List[str] = None if self.use_labels: lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ :str = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ :Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Optional[Any] ): """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowercase__ ( self : Optional[int] ): """simple docstring""" lowercase_ :Dict = self.get_config() lowercase_ :Optional[Any] = 300 return config def lowercase__ ( self : Optional[Any] , lowercase : Dict ): """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] ): """simple docstring""" lowercase_ :str = DebertaModel(config=lowercase ) model.to(lowercase ) model.eval() lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0] lowercase_ :Union[str, Any] = model(lowercase , token_type_ids=lowercase )[0] lowercase_ :Dict = model(lowercase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowercase__ ( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : Tuple ): """simple docstring""" lowercase_ :Dict = DebertaForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() lowercase_ :Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : int , lowercase : Dict ): """simple docstring""" lowercase_ :Dict = self.num_labels lowercase_ :int = DebertaForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowercase_ :Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowercase ) def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int ): """simple docstring""" lowercase_ :List[str] = self.num_labels lowercase_ :Optional[int] = DebertaForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() lowercase_ :Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ): """simple docstring""" lowercase_ :Any = DebertaForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() lowercase_ :List[Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Optional[int] = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) :List[str] = config_and_inputs lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __A = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __A = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) __A = True __A = False __A = False __A = False __A = False def lowercase__ ( self : Optional[int] ): """simple docstring""" lowercase_ :List[Any] = DebertaModelTester(self ) lowercase_ :str = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowercase ) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase ) def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase ) def lowercase__ ( self : int ): """simple docstring""" lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowercase ) def lowercase__ ( self : Tuple ): """simple docstring""" lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowercase ) @slow def lowercase__ ( self : Optional[int] ): """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ :Tuple = DebertaModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_torch @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def lowercase__ ( self : Dict ): """simple docstring""" pass @slow def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base" ) lowercase_ :Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) lowercase_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase )[0] # compare the actual values for a slice. lowercase_ :List[Any] = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
147
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _UpperCAmelCase : Dict = '''CIDAS/clipseg-rd64-refined''' _UpperCAmelCase : List[str] = '''image_segmenter''' _UpperCAmelCase : Dict = CLIPSegForImageSegmentation _UpperCAmelCase : Optional[int] = ['''image''', '''text'''] _UpperCAmelCase : List[str] = ['''image'''] def __init__( self : List[str] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Dict): requires_backends(self , ["vision"]) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : "Image" , lowerCAmelCase__ : str): return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors="pt") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): with torch.no_grad(): SCREAMING_SNAKE_CASE_: int = self.model(**lowerCAmelCase__).logits return logits def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: List[Any] = outputs.cpu().detach().numpy() SCREAMING_SNAKE_CASE_: Any = 0 SCREAMING_SNAKE_CASE_: int = 1 return Image.fromarray((array * 255).astype(np.uinta))
13
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = KandinskyVaaImgaImgPipeline lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "image"] lowerCAmelCase_ = [ "image_embeds", "negative_image_embeds", "image", ] lowerCAmelCase_ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCAmelCase_ = False @property def __a ( self : Union[str, Any] ): """simple docstring""" return 32 @property def __a ( self : Union[str, Any] ): """simple docstring""" return 32 @property def __a ( self : Optional[Any] ): """simple docstring""" return self.time_input_dim @property def __a ( self : Optional[int] ): """simple docstring""" return self.time_input_dim * 4 @property def __a ( self : List[str] ): """simple docstring""" return 1_00 @property def __a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**_lowercase ) return model @property def __a ( self : str ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dummy_unet SCREAMING_SNAKE_CASE__ = self.dummy_movq SCREAMING_SNAKE_CASE__ = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } SCREAMING_SNAKE_CASE__ = DDIMScheduler(**_lowercase ) SCREAMING_SNAKE_CASE__ = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __a ( self : Optional[Any] , _lowercase : Any , _lowercase : Tuple=0 ): """simple docstring""" SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase ) SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowercase ) # create init_image SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((2_56, 2_56) ) if str(_lowercase ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ = torch.manual_seed(_lowercase ) else: SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) SCREAMING_SNAKE_CASE__ = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """cpu""" SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_lowercase ) SCREAMING_SNAKE_CASE__ = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(_lowercase ) ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): def __a ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) SCREAMING_SNAKE_CASE__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) SCREAMING_SNAKE_CASE__ = """A red cartoon frog, 4k""" SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) SCREAMING_SNAKE_CASE__ = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_prior( _lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() SCREAMING_SNAKE_CASE__ = pipeline( image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
219
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case_ = logging.get_logger(__name__) def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' lowercase__ : int = DPTConfig() if "large" in checkpoint_url: lowercase__ : Union[str, Any] = 1_024 lowercase__ : Dict = 4_096 lowercase__ : str = 24 lowercase__ : Optional[int] = 16 lowercase__ : Union[str, Any] = [5, 11, 17, 23] lowercase__ : str = [256, 512, 1_024, 1_024] lowercase__ : int = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ : Tuple = True lowercase__ : List[Any] = 150 lowercase__ : List[str] = 'huggingface/label-files' lowercase__ : List[str] = 'ade20k-id2label.json' lowercase__ : Dict = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) ) , 'r' ) ) lowercase__ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[str] = {v: k for k, v in idalabel.items()} lowercase__ : List[Any] = [1, 150, 480, 480] return config, expected_shape def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' lowercase__ : Optional[Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ : Tuple = name.replace('pretrained.model' , 'dpt.encoder' ) if "pretrained.model" in name: lowercase__ : str = name.replace('pretrained.model' , 'dpt.embeddings' ) if "patch_embed" in name: lowercase__ : Optional[Any] = name.replace('patch_embed' , 'patch_embeddings' ) if "pos_embed" in name: lowercase__ : int = name.replace('pos_embed' , 'position_embeddings' ) if "attn.proj" in name: lowercase__ : Dict = name.replace('attn.proj' , 'attention.output.dense' ) if "proj" in name and "project" not in name: lowercase__ : str = name.replace('proj' , 'projection' ) if "blocks" in name: lowercase__ : List[Any] = name.replace('blocks' , 'layer' ) if "mlp.fc1" in name: lowercase__ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowercase__ : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' ) if "norm1" in name: lowercase__ : List[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowercase__ : Dict = name.replace('norm2' , 'layernorm_after' ) if "scratch.output_conv" in name: lowercase__ : int = name.replace('scratch.output_conv' , 'head' ) if "scratch" in name: lowercase__ : List[Any] = name.replace('scratch' , 'neck' ) if "layer1_rn" in name: lowercase__ : int = name.replace('layer1_rn' , 'convs.0' ) if "layer2_rn" in name: lowercase__ : List[Any] = name.replace('layer2_rn' , 'convs.1' ) if "layer3_rn" in name: lowercase__ : List[str] = name.replace('layer3_rn' , 'convs.2' ) if "layer4_rn" in name: lowercase__ : List[str] = name.replace('layer4_rn' , 'convs.3' ) if "refinenet" in name: lowercase__ : List[Any] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ : Optional[int] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: lowercase__ : int = name.replace('out_conv' , 'projection' ) if "resConfUnit1" in name: lowercase__ : Any = name.replace('resConfUnit1' , 'residual_layer1' ) if "resConfUnit2" in name: lowercase__ : List[str] = name.replace('resConfUnit2' , 'residual_layer2' ) if "conv1" in name: lowercase__ : List[Any] = name.replace('conv1' , 'convolution1' ) if "conv2" in name: lowercase__ : str = name.replace('conv2' , 'convolution2' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' ) if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ : Dict = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' ) if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' ) if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ : str = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ : List[Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' ) if "pretrained.act_postprocess1.4" in name: lowercase__ : Tuple = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' ) if "pretrained.act_postprocess2.3" in name: lowercase__ : str = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' ) if "pretrained.act_postprocess2.4" in name: lowercase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' ) if "pretrained.act_postprocess3.3" in name: lowercase__ : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' ) if "pretrained.act_postprocess4.3" in name: lowercase__ : Optional[int] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' ) if "pretrained.act_postprocess4.4" in name: lowercase__ : Union[str, Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' ) if "pretrained" in name: lowercase__ : Dict = name.replace('pretrained' , 'dpt' ) if "bn" in name: lowercase__ : Tuple = name.replace('bn' , 'batch_norm' ) if "head" in name: lowercase__ : List[Any] = name.replace('head' , 'head.head' ) if "encoder.norm" in name: lowercase__ : Optional[Any] = name.replace('encoder.norm' , 'layernorm' ) if "auxlayer" in name: lowercase__ : Optional[int] = name.replace('auxlayer' , 'auxiliary_head.head' ) return name def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ : Any = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) lowercase__ : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : List[Any] = in_proj_weight[: config.hidden_size, :] lowercase__ : Any = in_proj_bias[: config.hidden_size] lowercase__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] lowercase__ : List[str] = in_proj_bias[-config.hidden_size :] def snake_case__ ( ): '''simple docstring''' lowercase__ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' lowercase__ : Any = get_dpt_config(SCREAMING_SNAKE_CASE_ ) # load original state_dict from URL lowercase__ : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' ) # remove certain keys remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) # rename keys for key in state_dict.copy().keys(): lowercase__ : str = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ : List[str] = val # read in qkv matrices read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowercase__ : List[Any] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() # Check outputs on an image lowercase__ : Tuple = 480 if 'ade' in checkpoint_url else 384 lowercase__ : Tuple = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ ) lowercase__ : Optional[int] = prepare_img() lowercase__ : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) # forward pass lowercase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ ).logits if 'ade' in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth # Assert logits lowercase__ : Tuple = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: lowercase__ : List[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE_ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE_ ) ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print('Pushing model to hub...' ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) snake_case_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
366
from __future__ import annotations import math def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' lowercase__ : int = str(SCREAMING_SNAKE_CASE_ ) lowercase__ : Union[str, Any] = [n] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3: if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ): return False return True def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ): '''simple docstring''' lowercase__ : list[int] = [] lowercase__ : Tuple = 13 while len(SCREAMING_SNAKE_CASE_ ) != count: if validate(SCREAMING_SNAKE_CASE_ ): lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ ) if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ): list_truncated_primes.append(SCREAMING_SNAKE_CASE_ ) num += 2 return list_truncated_primes def snake_case__ ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F'''{sum(compute_truncated_primes(11)) = }''')
216
0
'''simple docstring''' A_ : Dict = { """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
215
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ : List[str] = logging.get_logger(__name__) A_ : Optional[Any] = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } A_ : Dict = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def snake_case_ ( lowerCAmelCase_ )-> int: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = EfficientNetConfig() _UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase : str = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase : int = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase : List[str] = """huggingface/label-files""" _UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json""" _UpperCAmelCase : List[str] = 1000 _UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) ) _UpperCAmelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _UpperCAmelCase : List[str] = idalabel _UpperCAmelCase : str = {v: k for k, v in idalabel.items()} return config def snake_case_ ( )-> Tuple: '''simple docstring''' _UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im def snake_case_ ( lowerCAmelCase_ )-> Tuple: '''simple docstring''' _UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : Tuple = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase_ , ) return preprocessor def snake_case_ ( lowerCAmelCase_ )-> List[str]: '''simple docstring''' _UpperCAmelCase : int = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase : Optional[int] = sorted(set(lowerCAmelCase_ ) ) _UpperCAmelCase : str = len(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )} _UpperCAmelCase : List[str] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase : Any = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase : Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase : str = """efficientnet.""" + item[1] _UpperCAmelCase : Optional[Any] = """classifier.weight""" _UpperCAmelCase : Optional[Any] = """classifier.bias""" return key_mapping def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase : Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: _UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: _UpperCAmelCase : int = torch.from_numpy(np.transpose(lowerCAmelCase_ ) ) else: _UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase_ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = model_classes[model_name]( include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , ) _UpperCAmelCase : List[str] = original_model.trainable_variables _UpperCAmelCase : Any = original_model.non_trainable_variables _UpperCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase : Dict = param.numpy() _UpperCAmelCase : Optional[Any] = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase : List[Any] = get_efficientnet_config(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval() _UpperCAmelCase : int = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase : Optional[int] = rename_keys(lowerCAmelCase_ ) replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Initialize preprocessor and preprocess input image _UpperCAmelCase : str = convert_image_processor(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase : List[str] = hf_model(**lowerCAmelCase_ ) _UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase : int = False _UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) _UpperCAmelCase : Optional[Any] = image.img_to_array(lowerCAmelCase_ ) _UpperCAmelCase : str = np.expand_dims(lowerCAmelCase_ , axis=0 ) _UpperCAmelCase : str = original_model.predict(lowerCAmelCase_ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowerCAmelCase_ ): os.mkdir(lowerCAmelCase_ ) # Save converted model and image processor hf_model.save_pretrained(lowerCAmelCase_ ) preprocessor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase : List[Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowerCAmelCase_ ) hf_model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") A_ : Optional[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
215
1
from __future__ import annotations UpperCAmelCase__ : List[str] =list[tuple[int, int]] UpperCAmelCase__ : str =[ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCAmelCase__ : Optional[int] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class __A : def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): lowerCamelCase =pos_x lowerCamelCase =pos_y lowerCamelCase =(pos_y, pos_x) lowerCamelCase =goal_x lowerCamelCase =goal_y lowerCamelCase =g_cost lowerCamelCase =parent lowerCamelCase =self.calculate_heuristic() def _snake_case ( self ): lowerCamelCase =abs(self.pos_x - self.goal_x ) lowerCamelCase =abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , UpperCAmelCase_ ): return self.f_cost < other.f_cost class __A : def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ ) lowerCamelCase =Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , UpperCAmelCase_ ) lowerCamelCase =[self.start] lowerCamelCase =[] lowerCamelCase =False def _snake_case ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCamelCase =self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase =True return self.retrace_path(UpperCAmelCase_ ) self.closed_nodes.append(UpperCAmelCase_ ) lowerCamelCase =self.get_successors(UpperCAmelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(UpperCAmelCase_ ) else: # retrieve the best current path lowerCamelCase =self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(UpperCAmelCase_ ) else: self.open_nodes.append(UpperCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _snake_case ( self , UpperCAmelCase_ ): lowerCamelCase =[] for action in delta: lowerCamelCase =parent.pos_x + action[1] lowerCamelCase =parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) ) return successors def _snake_case ( self , UpperCAmelCase_ ): lowerCamelCase =node lowerCamelCase =[] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase =current_node.parent path.reverse() return path if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] =(0, 0) UpperCAmelCase__ : List[str] =(len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') UpperCAmelCase__ : Union[str, Any] =GreedyBestFirst(init, goal) UpperCAmelCase__ : Union[str, Any] =greedy_bf.search() if path: for pos_x, pos_y in path: UpperCAmelCase__ : Optional[Any] =2 for elem in grid: print(elem)
262
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __A ( unittest.TestCase ): def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) lowerCamelCase =Vector() def _snake_case ( self ): lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3, 4] ) self.assertEqual(len(UpperCAmelCase_ ) , 4 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2] ) lowerCamelCase =Vector([1, 2, 3, 4, 5] ) lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product lowerCamelCase =Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def _snake_case ( self ): self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def _snake_case ( self ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 2, 3] ) lowerCamelCase =Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" ) def _snake_case ( self ): lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] ) lowerCamelCase =x.copy() self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) lowerCamelCase =Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def _snake_case ( self ): lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def _snake_case ( self ): self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
262
1