code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase__ ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , ) -> Dict: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCamelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Union[str, Any] = min_resolution _lowerCamelCase : Optional[int] = max_resolution _lowerCamelCase : List[Any] = do_resize _lowerCamelCase : str = size _lowerCamelCase : Union[str, Any] = do_normalize _lowerCamelCase : Union[str, Any] = image_mean _lowerCamelCase : Tuple = image_std _lowerCamelCase : List[Any] = do_rescale _lowerCamelCase : Dict = rescale_factor _lowerCamelCase : Any = do_pad def UpperCamelCase_ ( self) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Any: if not batched: _lowerCamelCase : Tuple = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE , Image.Image): _lowerCamelCase , _lowerCamelCase : Any = image.size else: _lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2] if w < h: _lowerCamelCase : Optional[int] = int(self.size["""shortest_edge"""] * h / w) _lowerCamelCase : Optional[int] = self.size["""shortest_edge"""] elif w > h: _lowerCamelCase : Any = self.size["""shortest_edge"""] _lowerCamelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h) else: _lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""] _lowerCamelCase : Optional[Any] = self.size["""shortest_edge"""] else: _lowerCamelCase : Any = [] for image in image_inputs: _lowerCamelCase , _lowerCamelCase : List[str] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) _lowerCamelCase : List[Any] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[0])[0] _lowerCamelCase : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: item[1])[1] return expected_height, expected_width @require_torch @require_vision class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = ConditionalDetrImageProcessingTester(self) @property def UpperCamelCase_ ( self) -> str: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""")) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333}) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84}) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: pass def UpperCamelCase_ ( self) -> Any: # Initialize image_processing _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image) # Test not batched input _lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase_ ( self) -> int: # Initialize image_processing _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray) # Test not batched input _lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase_ ( self) -> Optional[Any]: # Initialize image_processing _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor) # Test not batched input _lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCamelCase : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values _lowerCamelCase , _lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCamelCase_ ( self) -> str: # prepare image and target _lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f: _lowerCamelCase : List[Any] = json.loads(f.read()) _lowerCamelCase : Union[str, Any] = {"""image_id""": 3_9769, """annotations""": target} # encode them _lowerCamelCase : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""") _lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="""pt""") # verify pixel values _lowerCamelCase : Dict = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)) # verify area _lowerCamelCase : List[str] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE)) # verify boxes _lowerCamelCase : Union[str, Any] = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3)) # verify image_id _lowerCamelCase : Union[str, Any] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE)) # verify is_crowd _lowerCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE)) # verify class_labels _lowerCamelCase : Any = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE)) # verify orig_size _lowerCamelCase : List[str] = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE)) # verify size _lowerCamelCase : Tuple = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE)) @slow def UpperCamelCase_ ( self) -> Optional[Any]: # prepare image, target and masks_path _lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f: _lowerCamelCase : Optional[int] = json.loads(f.read()) _lowerCamelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} _lowerCamelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""") # encode them _lowerCamelCase : List[str] = ConditionalDetrImageProcessor(format="""coco_panoptic""") _lowerCamelCase : Dict = image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="""pt""") # verify pixel values _lowerCamelCase : int = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)) # verify area _lowerCamelCase : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE)) # verify boxes _lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3)) # verify image_id _lowerCamelCase : List[str] = torch.tensor([3_9769]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE)) # verify is_crowd _lowerCamelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE)) # verify class_labels _lowerCamelCase : str = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE)) # verify masks _lowerCamelCase : Any = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , SCREAMING_SNAKE_CASE) # verify orig_size _lowerCamelCase : List[Any] = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE)) # verify size _lowerCamelCase : Optional[Any] = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE))
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = IFInpaintingSuperResolutionPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} __UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) __UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase_ ( self) -> List[Any]: return self._get_superresolution_dummy_components() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> Optional[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCamelCase_ ( self) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def UpperCamelCase_ ( self) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def UpperCamelCase_ ( self) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1) def UpperCamelCase_ ( self) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def UpperCamelCase_ ( self) -> Dict: self._test_save_load_local() def UpperCamelCase_ ( self) -> int: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[int]: _lowerCamelCase : Tuple = parent _lowerCamelCase : List[Any] = 13 _lowerCamelCase : Union[str, Any] = 7 _lowerCamelCase : Optional[Any] = True _lowerCamelCase : List[Any] = True _lowerCamelCase : Tuple = True _lowerCamelCase : int = True _lowerCamelCase : Any = 99 _lowerCamelCase : Optional[int] = 384 _lowerCamelCase : Tuple = 2 _lowerCamelCase : List[Any] = 4 _lowerCamelCase : Any = 37 _lowerCamelCase : Any = """gelu""" _lowerCamelCase : Optional[Any] = 0.1 _lowerCamelCase : Optional[Any] = 0.1 _lowerCamelCase : Optional[Any] = 512 _lowerCamelCase : Any = 16 _lowerCamelCase : int = 2 _lowerCamelCase : Any = 0.02 _lowerCamelCase : Tuple = 3 _lowerCamelCase : Optional[int] = 4 _lowerCamelCase : Dict = 128 _lowerCamelCase : List[str] = 2 _lowerCamelCase : int = 9 _lowerCamelCase : Optional[Any] = 1 _lowerCamelCase : Any = None def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : Tuple = None if self.use_input_mask: _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Dict = None if self.use_token_type_ids: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : Any = None _lowerCamelCase : str = None _lowerCamelCase : List[Any] = None if self.use_labels: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices) _lowerCamelCase : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = TFConvBertModel(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _lowerCamelCase : int = [input_ids, input_mask] _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : List[str] = TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Tuple = self.num_labels _lowerCamelCase : Optional[int] = TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : Optional[Any] = self.num_choices _lowerCamelCase : Optional[int] = TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1)) _lowerCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1)) _lowerCamelCase : Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1)) _lowerCamelCase : Dict = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : str = self.num_labels _lowerCamelCase : Optional[int] = TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : List[str] = TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } _lowerCamelCase : str = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : int = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Union[str, Any] = config_and_inputs _lowerCamelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __UpperCAmelCase = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : List[str] = TFConvBertModelTester(self) _lowerCamelCase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37) def UpperCamelCase_ ( self) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : List[str] = True _lowerCamelCase : Union[str, Any] = True if hasattr(SCREAMING_SNAKE_CASE , """use_cache"""): _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length) _lowerCamelCase : Optional[Any] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE) for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = len(model(SCREAMING_SNAKE_CASE)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , """saved_model""" , """1""") _lowerCamelCase : str = tf.keras.models.load_model(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE) if self.is_encoder_decoder: _lowerCamelCase : Any = outputs["""encoder_hidden_states"""] _lowerCamelCase : Union[str, Any] = outputs["""encoder_attentions"""] else: _lowerCamelCase : Tuple = outputs["""hidden_states"""] _lowerCamelCase : List[Any] = outputs["""attentions"""] self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Tuple = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""") self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple = True _lowerCamelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length) _lowerCamelCase : int = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length) _lowerCamelCase : List[str] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = getattr(self.model_tester , """key_length""" , SCREAMING_SNAKE_CASE) def check_decoder_attentions_output(SCREAMING_SNAKE_CASE): _lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE) self.assertEqual(out_len % 2 , 0) _lowerCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Tuple = False _lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) _lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE) check_encoder_attentions_output(SCREAMING_SNAKE_CASE) if self.is_encoder_decoder: _lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE) check_decoder_attentions_output(SCREAMING_SNAKE_CASE) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE) check_encoder_attentions_output(SCREAMING_SNAKE_CASE) # Check attention is always last and order is fine _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE)) self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE) check_encoder_attentions_output(SCREAMING_SNAKE_CASE) @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : List[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""") _lowerCamelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]]) _lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE)[0] _lowerCamelCase : str = [1, 6, 768] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ]) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets UpperCAmelCase = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ UpperCAmelCase = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ UpperCAmelCase = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""")), """references""": datasets.Sequence(datasets.Value("""int32""")), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32"""), """references""": datasets.Value("""int32"""), }) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE="binary" , SCREAMING_SNAKE_CASE=None) -> Optional[int]: _lowerCamelCase : List[Any] = fa_score( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , pos_label=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE , sample_weight=SCREAMING_SNAKE_CASE) return {"f1": float(SCREAMING_SNAKE_CASE) if score.size == 1 else score}
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" UpperCAmelCase = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] UpperCAmelCase = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] UpperCAmelCase = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] UpperCAmelCase = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] UpperCAmelCase = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] UpperCAmelCase = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] UpperCAmelCase = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] UpperCAmelCase = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowercase__ ( A_ ): def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> str: _lowerCamelCase : str = path_or_paths _lowerCamelCase : Optional[Any] = split if split or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else """train""" _lowerCamelCase : int = features _lowerCamelCase : Any = cache_dir _lowerCamelCase : Optional[Any] = keep_in_memory _lowerCamelCase : Optional[int] = streaming _lowerCamelCase : Union[str, Any] = num_proc _lowerCamelCase : Union[str, Any] = kwargs @abstractmethod def UpperCamelCase_ ( self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class lowercase__ ( A_ ): def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple: _lowerCamelCase : Tuple = features _lowerCamelCase : Any = cache_dir _lowerCamelCase : int = keep_in_memory _lowerCamelCase : Dict = streaming _lowerCamelCase : List[str] = num_proc _lowerCamelCase : List[Any] = kwargs @abstractmethod def UpperCamelCase_ ( self) -> Union[Dataset, IterableDataset]: pass
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1
"""simple docstring""" from collections import Counter from timeit import timeit def _snake_case ( __snake_case : str = "" , ): """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def _snake_case ( __snake_case : str = "" ): """simple docstring""" if len(__snake_case ) == 0: return True _lowerCamelCase : Union[str, Any] = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string _lowerCamelCase : dict[str, int] = {} for character in lower_case_input_str: _lowerCamelCase : str = character_freq_dict.get(__snake_case , 0 ) + 1 _lowerCamelCase : Optional[int] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def _snake_case ( __snake_case : str = "" ): """simple docstring""" print("""\nFor string = """ , __snake_case , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__snake_case ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__snake_case ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": UpperCAmelCase = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
88
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
1
"""simple docstring""" import datasets from .evaluate import evaluate UpperCAmelCase = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ UpperCAmelCase = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ UpperCAmelCase = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string"""), """prediction_text""": datasets.Value("""string""")}, """references""": { """id""": datasets.Value("""string"""), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string"""), """answer_start""": datasets.Value("""int32"""), }), }, }) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple: _lowerCamelCase : Dict = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} _lowerCamelCase : Optional[Any] = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] _lowerCamelCase : str = evaluate(dataset=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE) return score
88
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
1
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _snake_case ( __snake_case : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Optional[int] = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(__snake_case , __snake_case , bias=__snake_case ) _lowerCamelCase : Dict = emb.weight.data return lin_layer def _snake_case ( __snake_case : Tuple , __snake_case : List[str]=None ): """simple docstring""" _lowerCamelCase : Any = {} for old_key in state_dict.keys(): _lowerCamelCase : Optional[int] = old_key if "moe_layer.experts." in key: if expert_idx is not None: _lowerCamelCase : Optional[Any] = key.replace("""moe_layer.experts.0""" , F'ffn.experts.expert_{expert_idx}' ) else: _lowerCamelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: _lowerCamelCase : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: _lowerCamelCase : Any = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: _lowerCamelCase : Tuple = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: _lowerCamelCase : str = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: _lowerCamelCase : Optional[int] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: _lowerCamelCase : Any = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) _lowerCamelCase : Optional[Any] = state_dict[old_key] return new_dict def _snake_case ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : List[str] , __snake_case : List[str] , __snake_case : str = WEIGHTS_NAME ): """simple docstring""" _lowerCamelCase : str = [] _lowerCamelCase : Dict = 0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): _lowerCamelCase : Tuple = switch_checkpoint_path + F'-rank-{expert}.pt' if os.path.isfile(__snake_case ): _lowerCamelCase : int = torch.load(__snake_case )["""model"""] remove_ignore_keys_(__snake_case ) _lowerCamelCase : int = rename_fairseq_keys(__snake_case , __snake_case ) _lowerCamelCase : Union[str, Any] = os.path.join( __snake_case , weights_name.replace(""".bin""" , F'-{len(__snake_case )+1:05d}-of-???.bin' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block _lowerCamelCase : List[Any] = os.path.join(__snake_case , weights_name.replace(""".bin""" , F'-{len(__snake_case )+1:05d}-of-???.bin' ) ) _lowerCamelCase : Dict = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(__snake_case ) _lowerCamelCase : Optional[int] = rename_fairseq_keys(__snake_case , __snake_case ) _lowerCamelCase : List[Any] = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: _lowerCamelCase : Dict = os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index _lowerCamelCase : str = {} for idx, shard in enumerate(__snake_case ): _lowerCamelCase : Optional[int] = weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-{len(__snake_case ):05d}.bin' ) _lowerCamelCase : List[str] = os.path.join(__snake_case , weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-???.bin' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: _lowerCamelCase : List[str] = shard_file # Add the metadata _lowerCamelCase : Optional[int] = {"""total_size""": total_size} _lowerCamelCase : Tuple = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__snake_case , __snake_case ) , """w""" , encoding="""utf-8""" ) as f: _lowerCamelCase : Union[str, Any] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n""" f.write(__snake_case ) return metadata, index if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) UpperCAmelCase = parser.parse_args() UpperCAmelCase , UpperCAmelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) UpperCAmelCase = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) UpperCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
88
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
1
"""simple docstring""" import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( __snake_case : List[Any] , __snake_case : str , __snake_case : int , __snake_case : int ): """simple docstring""" _lowerCamelCase : int = BigBirdConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: _lowerCamelCase : List[str] = BigBirdForQuestionAnswering(__snake_case ) else: _lowerCamelCase : Union[str, Any] = BigBirdForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__snake_case , __snake_case , is_trivia_qa=__snake_case ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__snake_case ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
88
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
1
"""simple docstring""" def _snake_case ( __snake_case : Any , __snake_case : Dict ): """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _snake_case ( __snake_case : str , __snake_case : List[str]=0 ): """simple docstring""" return sorted(__snake_case , key=lambda __snake_case : x[column] ) def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any]=float("""inf""" ) ): """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , __snake_case ): _lowerCamelCase : str = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCamelCase : Optional[Any] = current_dis return min_dis def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str]=float("""inf""" ) ): """simple docstring""" for i in range(min(6 , points_counts - 1 ) , __snake_case ): for j in range(max(0 , i - 6 ) , __snake_case ): _lowerCamelCase : Optional[int] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _lowerCamelCase : str = current_dis return min_dis def _snake_case ( __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Any ): """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(__snake_case , __snake_case ) # recursion _lowerCamelCase : Optional[int] = points_counts // 2 _lowerCamelCase : List[Any] = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[:mid] , __snake_case ) _lowerCamelCase : Any = closest_pair_of_points_sqr( __snake_case , points_sorted_on_y[mid:] , points_counts - mid ) _lowerCamelCase : str = min(__snake_case , __snake_case ) _lowerCamelCase : Optional[Any] = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__snake_case ) _lowerCamelCase : List[str] = dis_between_closest_in_strip( __snake_case , len(__snake_case ) , __snake_case ) return min(__snake_case , __snake_case ) def _snake_case ( __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : Dict = column_based_sort(__snake_case , column=0 ) _lowerCamelCase : List[str] = column_based_sort(__snake_case , column=1 ) return ( closest_pair_of_points_sqr( __snake_case , __snake_case , __snake_case ) ) ** 0.5 if __name__ == "__main__": UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
88
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : int = """laion/clap-htsat-unfused""" _lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Any: return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: shutil.rmtree(self.tmpdirname) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : List[Any] = self.get_feature_extractor() _lowerCamelCase : str = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : List[str] = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) _lowerCamelCase : str = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : Any = self.get_tokenizer() _lowerCamelCase : Optional[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = floats_list((3, 1000)) _lowerCamelCase : Tuple = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""") _lowerCamelCase : Any = processor(audios=SCREAMING_SNAKE_CASE , return_tensors="""np""") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : Union[str, Any] = self.get_feature_extractor() _lowerCamelCase : Optional[Any] = self.get_tokenizer() _lowerCamelCase : Tuple = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = """This is a test string""" _lowerCamelCase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[Any] = self.get_feature_extractor() _lowerCamelCase : Tuple = self.get_tokenizer() _lowerCamelCase : Any = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : int = processor.batch_decode(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = self.get_feature_extractor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : str = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
88
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from __future__ import annotations from collections import namedtuple def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : float ): """simple docstring""" _lowerCamelCase : Optional[Any] = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
1
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCAmelCase = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ UpperCAmelCase = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ UpperCAmelCase = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def _snake_case ( __snake_case : List[Any] , __snake_case : str ): """simple docstring""" return float((preds == labels).mean() ) def _snake_case ( __snake_case : Optional[int] , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = simple_accuracy(__snake_case , __snake_case ) _lowerCamelCase : Union[str, Any] = float(fa_score(y_true=__snake_case , y_pred=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( __snake_case : Optional[Any] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Dict = np.array(__snake_case ) _lowerCamelCase : Optional[Any] = np.array(__snake_case ) _lowerCamelCase : List[Any] = en_sentvecs.shape[0] # mean centering _lowerCamelCase : str = en_sentvecs - np.mean(__snake_case , axis=0 ) _lowerCamelCase : Optional[int] = in_sentvecs - np.mean(__snake_case , axis=0 ) _lowerCamelCase : Union[str, Any] = cdist(__snake_case , __snake_case , """cosine""" ) _lowerCamelCase : str = np.array(range(__snake_case ) ) _lowerCamelCase : Optional[Any] = sim.argsort(axis=1 )[:, :10] _lowerCamelCase : Dict = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> Tuple: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""") if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""")), """references""": datasets.Value("""int64""") if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""")), }) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)} else: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""")
88
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
1
"""simple docstring""" from ... import PretrainedConfig UpperCAmelCase = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class lowercase__ ( A_ ): __UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __UpperCAmelCase = '''nezha''' def __init__( self , SCREAMING_SNAKE_CASE=2_1128 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : Any = hidden_act _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : List[str] = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Dict = max_relative_position _lowerCamelCase : Union[str, Any] = type_vocab_size _lowerCamelCase : int = initializer_range _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : Union[str, Any] = classifier_dropout _lowerCamelCase : str = use_cache
88
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" def _snake_case ( __snake_case : int ): """simple docstring""" return str(__snake_case ) == str(__snake_case )[::-1] def _snake_case ( __snake_case : int ): """simple docstring""" return int(__snake_case ) + int(str(__snake_case )[::-1] ) def _snake_case ( __snake_case : int = 10000 ): """simple docstring""" _lowerCamelCase : str = [] for num in range(1 , __snake_case ): _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = num while iterations < 50: _lowerCamelCase : str = sum_reverse(__snake_case ) iterations += 1 if is_palindrome(__snake_case ): break else: lychrel_nums.append(__snake_case ) return len(__snake_case ) if __name__ == "__main__": print(f'''{solution() = }''')
88
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = DDIMPipeline __UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''latents''', '''callback''', '''callback_steps''', } __UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[int]: torch.manual_seed(0) _lowerCamelCase : str = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) _lowerCamelCase : List[Any] = DDIMScheduler() _lowerCamelCase : List[str] = {"""unet""": unet, """scheduler""": scheduler} return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> str: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Optional[Any] = """cpu""" _lowerCamelCase : Any = self.get_dummy_components() _lowerCamelCase : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE) pipe.to(SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3)) _lowerCamelCase : int = np.array( [1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4]) _lowerCamelCase : List[str] = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-3) def UpperCamelCase_ ( self) -> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def UpperCamelCase_ ( self) -> Union[str, Any]: super().test_save_load_local(expected_max_difference=3e-3) def UpperCamelCase_ ( self) -> Union[str, Any]: super().test_save_load_optional_components(expected_max_difference=3e-3) def UpperCamelCase_ ( self) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : Any = """google/ddpm-cifar10-32""" _lowerCamelCase : int = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = DDIMScheduler() _lowerCamelCase : List[str] = DDIMPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE) ddim.to(SCREAMING_SNAKE_CASE) ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = torch.manual_seed(0) _lowerCamelCase : Dict = ddim(generator=SCREAMING_SNAKE_CASE , eta=0.0 , output_type="""numpy""").images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : int = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Union[str, Any] = """google/ddpm-ema-bedroom-256""" _lowerCamelCase : Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = DDIMPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE) ddpm.to(SCREAMING_SNAKE_CASE) ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = torch.manual_seed(0) _lowerCamelCase : int = ddpm(generator=SCREAMING_SNAKE_CASE , output_type="""numpy""").images _lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
88
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
1
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase__ ( nn.Module ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0.0 __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = [] _lowerCamelCase : List[Any] = [] for i in range(self.num_layers): _lowerCamelCase : Dict = self.in_channels if i == 0 else self.out_channels _lowerCamelCase : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = resnets _lowerCamelCase : Tuple = attentions if self.add_downsample: _lowerCamelCase : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> Optional[int]: _lowerCamelCase : Tuple = () for resnet, attn in zip(self.resnets , self.attentions): _lowerCamelCase : List[str] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) output_states += (hidden_states,) if self.add_downsample: _lowerCamelCase : int = self.downsamplers_a(SCREAMING_SNAKE_CASE) output_states += (hidden_states,) return hidden_states, output_states class lowercase__ ( nn.Module ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0.0 __UpperCAmelCase = 1 __UpperCAmelCase = True __UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[int] = [] for i in range(self.num_layers): _lowerCamelCase : Tuple = self.in_channels if i == 0 else self.out_channels _lowerCamelCase : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = resnets if self.add_downsample: _lowerCamelCase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> Tuple: _lowerCamelCase : Optional[int] = () for resnet in self.resnets: _lowerCamelCase : Dict = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) output_states += (hidden_states,) if self.add_downsample: _lowerCamelCase : List[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE) output_states += (hidden_states,) return hidden_states, output_states class lowercase__ ( nn.Module ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0.0 __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = [] _lowerCamelCase : List[str] = [] for i in range(self.num_layers): _lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCamelCase : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels _lowerCamelCase : Optional[int] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = resnets _lowerCamelCase : Optional[Any] = attentions if self.add_upsample: _lowerCamelCase : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> int: for resnet, attn in zip(self.resnets , self.attentions): # pop res hidden states _lowerCamelCase : Any = res_hidden_states_tuple[-1] _lowerCamelCase : Tuple = res_hidden_states_tuple[:-1] _lowerCamelCase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) _lowerCamelCase : str = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) if self.add_upsample: _lowerCamelCase : Any = self.upsamplers_a(SCREAMING_SNAKE_CASE) return hidden_states class lowercase__ ( nn.Module ): __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 0.0 __UpperCAmelCase = 1 __UpperCAmelCase = True __UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : str = [] for i in range(self.num_layers): _lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels _lowerCamelCase : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels _lowerCamelCase : Dict = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = resnets if self.add_upsample: _lowerCamelCase : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> int: for resnet in self.resnets: # pop res hidden states _lowerCamelCase : Union[str, Any] = res_hidden_states_tuple[-1] _lowerCamelCase : str = res_hidden_states_tuple[:-1] _lowerCamelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1) _lowerCamelCase : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) if self.add_upsample: _lowerCamelCase : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE) return hidden_states class lowercase__ ( nn.Module ): __UpperCAmelCase = 42 __UpperCAmelCase = 0.0 __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = jnp.floataa def UpperCamelCase_ ( self) -> Optional[int]: # there is always at least one resnet _lowerCamelCase : Optional[int] = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _lowerCamelCase : str = [] for _ in range(self.num_layers): _lowerCamelCase : Dict = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = resnets _lowerCamelCase : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : Any = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for attn, resnet in zip(self.attentions , self.resnets[1:]): _lowerCamelCase : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE) return hidden_states
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" from math import pi, sqrt def _snake_case ( __snake_case : float ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 171.5: raise OverflowError("""math range error""" ) elif num - int(__snake_case ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(__snake_case ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _snake_case ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(__snake_case ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = 1.0 while num: UpperCAmelCase = float(input("""Gamma of: """)) print(f'''gamma({num}) = {gamma(num)}''') print("""\nEnter 0 to exit...""")
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE="divided_space_time" , SCREAMING_SNAKE_CASE=None , ) -> int: _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : List[Any] = image_size _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Optional[int] = patch_size _lowerCamelCase : Dict = num_frames _lowerCamelCase : Union[str, Any] = is_training _lowerCamelCase : Dict = use_labels _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Optional[Any] = hidden_act _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Dict = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = attention_type _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : Union[str, Any] = scope _lowerCamelCase : int = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token _lowerCamelCase : str = (image_size // patch_size) ** 2 _lowerCamelCase : Optional[int] = (num_frames) * self.num_patches_per_frame + 1 def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : Any = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels) _lowerCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self) -> int: _lowerCamelCase : List[str] = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) _lowerCamelCase : Optional[Any] = self.num_labels return config def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple: _lowerCamelCase : Dict = TimesformerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : str = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : str = model(SCREAMING_SNAKE_CASE) # verify the logits shape _lowerCamelCase : Dict = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = self.prepare_config_and_inputs() _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs _lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : Any = TimesformerModelTester(self) _lowerCamelCase : List[Any] = ConfigTester( self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> str: _lowerCamelCase : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE): _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) return inputs_dict def UpperCamelCase_ ( self) -> str: self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Tuple: pass def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _lowerCamelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Tuple = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> str: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : str = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: if not self.has_attentions: pass else: _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Tuple = True for model_class in self.all_model_classes: _lowerCamelCase : int = self.model_tester.seq_length _lowerCamelCase : Optional[int] = self.model_tester.num_frames _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = False _lowerCamelCase : List[Any] = True _lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): _lowerCamelCase : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) _lowerCamelCase : int = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowerCamelCase : List[str] = True _lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): _lowerCamelCase : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) _lowerCamelCase : str = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) _lowerCamelCase : str = len(SCREAMING_SNAKE_CASE) # Check attention is always last and order is fine _lowerCamelCase : Tuple = True _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): _lowerCamelCase : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE)) _lowerCamelCase : Optional[Any] = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def UpperCamelCase_ ( self) -> str: def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = outputs.hidden_states _lowerCamelCase : Union[str, Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) _lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[str] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : str = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) _lowerCamelCase : Optional[Any] = np.load(__snake_case ) return list(__snake_case ) @require_torch @require_vision class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> str: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : Union[str, Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""").to( SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = self.default_image_processor _lowerCamelCase : Dict = prepare_video() _lowerCamelCase : int = image_processor(video[:8] , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) # forward pass with torch.no_grad(): _lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE) # verify the logits _lowerCamelCase : Tuple = torch.Size((1, 400)) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home UpperCAmelCase = HUGGINGFACE_HUB_CACHE UpperCAmelCase = """config.json""" UpperCAmelCase = """diffusion_pytorch_model.bin""" UpperCAmelCase = """diffusion_flax_model.msgpack""" UpperCAmelCase = """model.onnx""" UpperCAmelCase = """diffusion_pytorch_model.safetensors""" UpperCAmelCase = """weights.pb""" UpperCAmelCase = """https://huggingface.co""" UpperCAmelCase = default_cache_path UpperCAmelCase = """diffusers_modules""" UpperCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules""")) UpperCAmelCase = ["""fp16""", """non-ema"""] UpperCAmelCase = """.self_attn"""
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case ( __snake_case : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = 384 _lowerCamelCase : Dict = 7 if "tiny" in model_name: _lowerCamelCase : str = 96 _lowerCamelCase : Union[str, Any] = (2, 2, 6, 2) _lowerCamelCase : Union[str, Any] = (3, 6, 12, 24) elif "small" in model_name: _lowerCamelCase : Union[str, Any] = 96 _lowerCamelCase : Union[str, Any] = (2, 2, 18, 2) _lowerCamelCase : Any = (3, 6, 12, 24) elif "base" in model_name: _lowerCamelCase : List[str] = 128 _lowerCamelCase : Tuple = (2, 2, 18, 2) _lowerCamelCase : int = (4, 8, 16, 32) _lowerCamelCase : int = 12 _lowerCamelCase : List[Any] = 512 elif "large" in model_name: _lowerCamelCase : str = 192 _lowerCamelCase : Any = (2, 2, 18, 2) _lowerCamelCase : Any = (6, 12, 24, 48) _lowerCamelCase : Tuple = 12 _lowerCamelCase : List[Any] = 768 # set label information _lowerCamelCase : int = 150 _lowerCamelCase : Any = """huggingface/label-files""" _lowerCamelCase : str = """ade20k-id2label.json""" _lowerCamelCase : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Any = {int(__snake_case ): v for k, v in idalabel.items()} _lowerCamelCase : Any = {v: k for k, v in idalabel.items()} _lowerCamelCase : int = SwinConfig( embed_dim=__snake_case , depths=__snake_case , num_heads=__snake_case , window_size=__snake_case , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) _lowerCamelCase : Optional[int] = UperNetConfig( backbone_config=__snake_case , auxiliary_in_channels=__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case , ) return config def _snake_case ( __snake_case : Any ): """simple docstring""" _lowerCamelCase : int = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.stages.{i}.downsample.reduction.weight', F'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.stages.{i}.downsample.norm.weight', F'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.stages.{i}.downsample.norm.bias', F'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def _snake_case ( __snake_case : str , __snake_case : Any , __snake_case : str ): """simple docstring""" _lowerCamelCase : List[Any] = dct.pop(__snake_case ) _lowerCamelCase : int = val def _snake_case ( __snake_case : int , __snake_case : Dict ): """simple docstring""" _lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCamelCase : Union[str, Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCamelCase : Optional[Any] = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) _lowerCamelCase : str = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Union[str, Any] = in_proj_weight[:dim, :] _lowerCamelCase : Union[str, Any] = in_proj_bias[: dim] _lowerCamelCase : List[Any] = in_proj_weight[ dim : dim * 2, : ] _lowerCamelCase : Union[str, Any] = in_proj_bias[ dim : dim * 2 ] _lowerCamelCase : int = in_proj_weight[ -dim :, : ] _lowerCamelCase : Tuple = in_proj_bias[-dim :] # fmt: on def _snake_case ( __snake_case : str ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : int = x.shape _lowerCamelCase : Union[str, Any] = x.reshape(__snake_case , 4 , in_channel // 4 ) _lowerCamelCase : List[str] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__snake_case , __snake_case ) return x def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = x.shape _lowerCamelCase : Optional[Any] = x.reshape(__snake_case , in_channel // 4 , 4 ) _lowerCamelCase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__snake_case , __snake_case ) return x def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Any = x.shape[0] _lowerCamelCase : int = x.reshape(4 , in_channel // 4 ) _lowerCamelCase : int = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__snake_case ) return x def _snake_case ( __snake_case : Dict ): """simple docstring""" _lowerCamelCase : Tuple = x.shape[0] _lowerCamelCase : int = x.reshape(in_channel // 4 , 4 ) _lowerCamelCase : Dict = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__snake_case ) return x def _snake_case ( __snake_case : Tuple , __snake_case : str , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : str = { """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } _lowerCamelCase : Optional[int] = model_name_to_url[model_name] _lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(__snake_case , map_location="""cpu""" , file_name=__snake_case )[ """state_dict""" ] for name, param in state_dict.items(): print(__snake_case , param.shape ) _lowerCamelCase : str = get_upernet_config(__snake_case ) _lowerCamelCase : List[str] = UperNetForSemanticSegmentation(__snake_case ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCamelCase : Union[str, Any] = state_dict.pop(__snake_case ) if "bn" in key: _lowerCamelCase : List[Any] = key.replace("""bn""" , """batch_norm""" ) _lowerCamelCase : List[Any] = val # rename keys _lowerCamelCase : Tuple = create_rename_keys(__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) read_in_q_k_v(__snake_case , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCamelCase : Optional[int] = reverse_correct_unfold_reduction_order(__snake_case ) if "norm" in key: _lowerCamelCase : Optional[int] = reverse_correct_unfold_norm_order(__snake_case ) model.load_state_dict(__snake_case ) # verify on image _lowerCamelCase : Dict = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" _lowerCamelCase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" ) _lowerCamelCase : Optional[int] = SegformerImageProcessor() _lowerCamelCase : Any = processor(__snake_case , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCamelCase : List[str] = model(__snake_case ) _lowerCamelCase : Dict = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCamelCase : Tuple = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": _lowerCamelCase : List[Any] = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": _lowerCamelCase : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": _lowerCamelCase : Optional[int] = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __snake_case , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__snake_case ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(__snake_case ) if push_to_hub: print(F'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(F'openmmlab/{model_name}' ) processor.push_to_hub(F'openmmlab/{model_name}' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]: _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : Any = seq_length _lowerCamelCase : List[str] = is_training _lowerCamelCase : List[str] = use_token_type_ids _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : str = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : List[str] = num_labels _lowerCamelCase : Tuple = num_choices _lowerCamelCase : Optional[int] = scope _lowerCamelCase : Any = self.vocab_size - 1 def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : int = None if self.use_token_type_ids: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) _lowerCamelCase : str = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _lowerCamelCase : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : List[Any] = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Any = self.num_labels _lowerCamelCase : List[Any] = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Any = config_and_inputs _lowerCamelCase : Any = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class lowercase__ ( A_ ,A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __UpperCAmelCase = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Union[str, Any]: _lowerCamelCase : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowerCamelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Optional[int] = inputs_dict["""labels"""] _lowerCamelCase : Dict = inputs_dict["""labels"""] _lowerCamelCase : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) return inputs_dict def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[int] = OpenAIGPTModelTester(self) _lowerCamelCase : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> str: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @require_torch class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""") model.to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE) # the president is _lowerCamelCase : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowerCamelCase : Optional[Any] = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase = TypeVar("""T""") def _snake_case ( __snake_case : int ): """simple docstring""" return (position - 1) // 2 def _snake_case ( __snake_case : int ): """simple docstring""" return (2 * position) + 1 def _snake_case ( __snake_case : int ): """simple docstring""" return (2 * position) + 2 class lowercase__ ( Generic[T] ): def __init__( self) -> None: _lowerCamelCase : list[tuple[T, int]] = [] _lowerCamelCase : dict[T, int] = {} _lowerCamelCase : int = 0 def __len__( self) -> int: return self.elements def __repr__( self) -> str: return str(self.heap) def UpperCamelCase_ ( self) -> bool: # Check if the priority queue is empty return self.elements == 0 def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight)) _lowerCamelCase : Any = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1) _lowerCamelCase , _lowerCamelCase : Dict = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _lowerCamelCase , _lowerCamelCase : Tuple = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE) return elem def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: # Update the weight of the given key _lowerCamelCase : List[Any] = self.position_map[elem] _lowerCamelCase : Tuple = (elem, weight) if position > 0: _lowerCamelCase : List[str] = get_parent_position(SCREAMING_SNAKE_CASE) _lowerCamelCase , _lowerCamelCase : int = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE) else: self._bubble_down(SCREAMING_SNAKE_CASE) else: self._bubble_down(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _lowerCamelCase : int = self.position_map[elem] if curr_pos == 0: return None _lowerCamelCase : List[Any] = get_parent_position(SCREAMING_SNAKE_CASE) _lowerCamelCase , _lowerCamelCase : Dict = self.heap[curr_pos] _lowerCamelCase , _lowerCamelCase : List[Any] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return self._bubble_up(SCREAMING_SNAKE_CASE) return None def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _lowerCamelCase : int = self.position_map[elem] _lowerCamelCase , _lowerCamelCase : Dict = self.heap[curr_pos] _lowerCamelCase : str = get_child_left_position(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = get_child_right_position(SCREAMING_SNAKE_CASE) if child_left_position < self.elements and child_right_position < self.elements: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.heap[child_left_position] _lowerCamelCase , _lowerCamelCase : Dict = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return self._bubble_down(SCREAMING_SNAKE_CASE) if child_left_position < self.elements: _lowerCamelCase , _lowerCamelCase : Dict = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return self._bubble_down(SCREAMING_SNAKE_CASE) else: return None if child_right_position < self.elements: _lowerCamelCase , _lowerCamelCase : int = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return self._bubble_down(SCREAMING_SNAKE_CASE) return None def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: # Swap the nodes at the given positions _lowerCamelCase : List[str] = self.heap[nodea_pos][0] _lowerCamelCase : Dict = self.heap[nodea_pos][0] _lowerCamelCase , _lowerCamelCase : List[Any] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _lowerCamelCase : Optional[Any] = nodea_pos _lowerCamelCase : int = nodea_pos class lowercase__ ( Generic[T] ): def __init__( self) -> None: _lowerCamelCase : dict[T, dict[T, int]] = {} _lowerCamelCase : int = 0 def __repr__( self) -> str: return str(self.connections) def __len__( self) -> int: return self.nodes def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _lowerCamelCase : int = {} self.nodes += 1 def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: # Add an edge between 2 nodes in the graph self.add_node(SCREAMING_SNAKE_CASE) self.add_node(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = weight _lowerCamelCase : int = weight def _snake_case ( __snake_case : GraphUndirectedWeighted[T] , ): """simple docstring""" _lowerCamelCase : dict[T, int] = {node: maxsize for node in graph.connections} _lowerCamelCase : dict[T, T | None] = {node: None for node in graph.connections} _lowerCamelCase : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__snake_case , __snake_case ) if priority_queue.is_empty(): return dist, parent # initialization _lowerCamelCase : List[str] = priority_queue.extract_min() _lowerCamelCase : str = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCamelCase : List[Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__snake_case , dist[neighbour] ) _lowerCamelCase : Tuple = node # running prim's algorithm while not priority_queue.is_empty(): _lowerCamelCase : List[Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCamelCase : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__snake_case , dist[neighbour] ) _lowerCamelCase : List[str] = node return dist, parent
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class lowercase__ ( A_ ): __UpperCAmelCase = '''big_bird''' def __init__( self , SCREAMING_SNAKE_CASE=5_0358 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=66 , SCREAMING_SNAKE_CASE="block_sparse" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , sep_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : int = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : int = intermediate_size _lowerCamelCase : int = hidden_act _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : str = initializer_range _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : Optional[Any] = use_cache _lowerCamelCase : Any = rescale_embeddings _lowerCamelCase : Optional[int] = attention_type _lowerCamelCase : Optional[int] = use_bias _lowerCamelCase : List[Any] = block_size _lowerCamelCase : str = num_random_blocks _lowerCamelCase : Any = classifier_dropout class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : str = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" UpperCAmelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1) UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class lowercase__ : __UpperCAmelCase = 42 __UpperCAmelCase = 42 class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : Node | None = None for i in sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE): _lowerCamelCase : Optional[int] = Node(SCREAMING_SNAKE_CASE , self.head) def __iter__( self) -> Iterator[int]: _lowerCamelCase : Any = self.head while node: yield node.data _lowerCamelCase : Optional[int] = node.next_node def __len__( self) -> int: return sum(1 for _ in self) def __str__( self) -> str: return " -> ".join([str(SCREAMING_SNAKE_CASE) for node in self]) def _snake_case ( __snake_case : SortedLinkedList , __snake_case : SortedLinkedList ): """simple docstring""" return SortedLinkedList(list(__snake_case ) + list(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
88
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
1
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) UpperCAmelCase = logging.getLogger(__name__) UpperCAmelCase = tf.data.AUTOTUNE def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=__snake_case , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=__snake_case , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=__snake_case , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=__snake_case , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=__snake_case , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=__snake_case , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=__snake_case , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=__snake_case , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=__snake_case , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=__snake_case , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=__snake_case , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=__snake_case , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=__snake_case , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=__snake_case , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=__snake_case , required=__snake_case , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=__snake_case , help="""Model ID to upload to on the Hugging Face Hub.""" ) _lowerCamelCase : Any = parser.parse_args() return args def _snake_case ( __snake_case : str ): """simple docstring""" try: if args.tpu_name: _lowerCamelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _lowerCamelCase : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(__snake_case ) tf.tpu.experimental.initialize_tpu_system(__snake_case ) return tpu def _snake_case ( __snake_case : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = 0 for file in file_list: _lowerCamelCase : str = file.split("""/""" )[-1] _lowerCamelCase : str = re.search(R"""-\d+-(\d+)\.tfrecord""" , __snake_case ).group(1 ) _lowerCamelCase : Dict = int(__snake_case ) num_samples += sample_count return num_samples def _snake_case ( __snake_case : int , __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any]=None ): """simple docstring""" _lowerCamelCase : Dict = count_samples(__snake_case ) _lowerCamelCase : Any = tf.data.Dataset.from_tensor_slices(__snake_case ) if shuffle: _lowerCamelCase : str = dataset.shuffle(len(__snake_case ) ) _lowerCamelCase : Tuple = tf.data.TFRecordDataset(__snake_case , num_parallel_reads=__snake_case ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _lowerCamelCase : List[Any] = dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) ) _lowerCamelCase : Union[str, Any] = dataset.map(__snake_case , num_parallel_calls=__snake_case ) if shuffle: assert shuffle_buffer_size is not None _lowerCamelCase : Any = dataset.shuffle(args.shuffle_buffer_size ) _lowerCamelCase : Any = dataset.batch(__snake_case , drop_remainder=__snake_case ) _lowerCamelCase : List[Any] = dataset.map(__snake_case , num_parallel_calls=__snake_case ) _lowerCamelCase : Tuple = dataset.prefetch(__snake_case ) return dataset def _snake_case ( __snake_case : int ): """simple docstring""" if not args.no_tpu: _lowerCamelCase : Union[str, Any] = initialize_tpu(__snake_case ) _lowerCamelCase : str = tf.distribute.TPUStrategy(__snake_case ) else: _lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer ) _lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(args.pretrained_model_config ) _lowerCamelCase : Any = tokenizer.vocab_size _lowerCamelCase : Optional[Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _lowerCamelCase : Any = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _lowerCamelCase : Dict = count_samples(__snake_case ) _lowerCamelCase : str = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _lowerCamelCase : int = steps_per_epoch * args.num_epochs with strategy.scope(): _lowerCamelCase : Any = TFAutoModelForMaskedLM.from_config(__snake_case ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _lowerCamelCase , _lowerCamelCase : Dict = create_optimizer( num_train_steps=__snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=__snake_case , metrics=["""accuracy"""] ) def decode_fn(__snake_case : Optional[int] ): _lowerCamelCase : str = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(__snake_case , __snake_case ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _lowerCamelCase : Tuple = DataCollatorForLanguageModeling( tokenizer=__snake_case , mlm_probability=args.mlm_probability , mlm=__snake_case , return_tensors="""tf""" ) def mask_with_collator(__snake_case : int ): # TF really needs an isin() function _lowerCamelCase : Optional[Any] = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) _lowerCamelCase , _lowerCamelCase : Optional[int] = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(__snake_case ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__snake_case , ) return batch _lowerCamelCase : Optional[int] = args.per_replica_batch_size * strategy.num_replicas_in_sync _lowerCamelCase : Tuple = prepare_dataset( __snake_case , decode_fn=__snake_case , mask_fn=__snake_case , batch_size=__snake_case , shuffle=__snake_case , shuffle_buffer_size=args.shuffle_buffer_size , ) _lowerCamelCase : int = prepare_dataset( __snake_case , decode_fn=__snake_case , mask_fn=__snake_case , batch_size=__snake_case , shuffle=__snake_case , ) _lowerCamelCase : Dict = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__snake_case ) ) model.fit( __snake_case , validation_data=__snake_case , epochs=args.num_epochs , callbacks=__snake_case , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": UpperCAmelCase = parse_args() main(args)
88
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
1
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowercase__ ( A_ ): def __lt__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: return self[-1] < other[-1] def __eq__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: return self[-1] == other[-1] def _snake_case ( __snake_case : list ): """simple docstring""" _lowerCamelCase : list[Stack] = [] # sort into stacks for element in collection: _lowerCamelCase : Optional[int] = Stack([element] ) _lowerCamelCase : Optional[Any] = bisect_left(__snake_case , __snake_case ) if i != len(__snake_case ): stacks[i].append(__snake_case ) else: stacks.append(__snake_case ) # use a heap-based merge to merge stack efficiently _lowerCamelCase : Union[str, Any] = merge(*(reversed(__snake_case ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase = [int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
88
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
1
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping UpperCAmelCase = tuple[int, int] class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : set[int] = vertices _lowerCamelCase : dict[EdgeT, int] = { (min(SCREAMING_SNAKE_CASE), max(SCREAMING_SNAKE_CASE)): weight for edge, weight in edges.items() } def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: self.vertices.add(edge[0]) self.vertices.add(edge[1]) _lowerCamelCase : Optional[int] = weight def UpperCamelCase_ ( self) -> Graph: _lowerCamelCase : Graph = Graph({min(self.vertices)} , {}) _lowerCamelCase : EdgeT _lowerCamelCase : int _lowerCamelCase : EdgeT _lowerCamelCase : int while len(subgraph.vertices) < len(self.vertices): _lowerCamelCase : List[str] = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _lowerCamelCase : int = edge _lowerCamelCase : List[Any] = weight subgraph.add_edge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return subgraph def _snake_case ( __snake_case : str = "p107_network.txt" ): """simple docstring""" _lowerCamelCase : str = os.path.abspath(os.path.dirname(__snake_case ) ) _lowerCamelCase : str = os.path.join(__snake_case , __snake_case ) _lowerCamelCase : dict[EdgeT, int] = {} _lowerCamelCase : list[str] _lowerCamelCase : int _lowerCamelCase : int with open(__snake_case ) as f: _lowerCamelCase : Optional[Any] = f.read().strip().split("""\n""" ) _lowerCamelCase : List[str] = [line.split(""",""" ) for line in data] for edgea in range(1 , len(__snake_case ) ): for edgea in range(__snake_case ): if adjaceny_matrix[edgea][edgea] != "-": _lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) _lowerCamelCase : Graph = Graph(set(range(len(__snake_case ) ) ) , __snake_case ) _lowerCamelCase : Graph = graph.prims_algorithm() _lowerCamelCase : int = sum(graph.edges.values() ) _lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
88
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import os import sys UpperCAmelCase = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) UpperCAmelCase = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def _snake_case ( *__snake_case : Dict , **__snake_case : Union[str, Any] ): """simple docstring""" return AutoConfig.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _snake_case ( *__snake_case : str , **__snake_case : Any ): """simple docstring""" return AutoTokenizer.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModel.__doc__ ) def _snake_case ( *__snake_case : Optional[Any] , **__snake_case : Dict ): """simple docstring""" return AutoModel.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _snake_case ( *__snake_case : int , **__snake_case : Dict ): """simple docstring""" return AutoModelForCausalLM.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _snake_case ( *__snake_case : int , **__snake_case : List[str] ): """simple docstring""" return AutoModelForMaskedLM.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _snake_case ( *__snake_case : int , **__snake_case : int ): """simple docstring""" return AutoModelForSequenceClassification.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _snake_case ( *__snake_case : Dict , **__snake_case : List[Any] ): """simple docstring""" return AutoModelForQuestionAnswering.from_pretrained(*__snake_case , **__snake_case )
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = """▁""" UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase = { """facebook/xglm-564M""": 2048, } class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: _lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer _lowerCamelCase : List[str] = 7 _lowerCamelCase : List[str] = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] _lowerCamelCase : Union[str, Any] = kwargs.get("""additional_special_tokens""" , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE)) _lowerCamelCase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowerCamelCase : List[Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token _lowerCamelCase : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} _lowerCamelCase : str = len(self.sp_model) _lowerCamelCase : List[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self) -> Optional[Any]: _lowerCamelCase : str = self.__dict__.copy() _lowerCamelCase : Any = None _lowerCamelCase : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : Optional[int] = {} _lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a _lowerCamelCase : List[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def UpperCamelCase_ ( self) -> List[str]: return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowerCamelCase : List[Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : Any = """""".join(SCREAMING_SNAKE_CASE).replace(SCREAMING_SNAKE_CASE , """ """).strip() return out_string def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return _lowerCamelCase : Tuple = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE , """wb""") as fi: _lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE) return (out_vocab_file,)
88
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase_ ( lowerCamelCase ): a__ = ['''pixel_values'''] def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 2_5_5 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) __magic_name__ :Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6} __magic_name__ :List[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) __magic_name__ :List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} __magic_name__ :int = get_size_dict(__lowerCAmelCase ) __magic_name__ :Optional[int] = do_resize __magic_name__ :List[str] = size __magic_name__ :List[str] = resample __magic_name__ :str = do_center_crop __magic_name__ :Union[str, Any] = crop_size __magic_name__ :Tuple = do_rescale __magic_name__ :Optional[Any] = rescale_factor __magic_name__ :Dict = do_normalize __magic_name__ :List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ :str = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ :Dict = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :Any = get_size_dict(__lowerCAmelCase ) return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ): """simple docstring""" return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ): """simple docstring""" __magic_name__ :Optional[int] = do_resize if do_resize is not None else self.do_resize __magic_name__ :Union[str, Any] = size if size is not None else self.size __magic_name__ :Dict = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) __magic_name__ :Optional[Any] = resample if resample is not None else self.resample __magic_name__ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ :List[str] = crop_size if crop_size is not None else self.crop_size __magic_name__ :str = get_size_dict(__lowerCAmelCase ) __magic_name__ :Tuple = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ :Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ :Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean __magic_name__ :Union[str, Any] = image_std if image_std is not None else self.image_std __magic_name__ :List[Any] = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __magic_name__ :Union[str, Any] = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: __magic_name__ :Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_center_crop: __magic_name__ :str = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images] if do_rescale: __magic_name__ :List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] if do_normalize: __magic_name__ :Optional[int] = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images] __magic_name__ :Dict = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] __magic_name__ :Any = {'''pixel_values''': images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __snake_case = logging.get_logger(__name__) __snake_case = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off __snake_case = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] __snake_case = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class __lowerCamelCase (_a ): _lowercase = """whisper""" _lowercase = ["""past_key_values"""] _lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self: Union[str, Any],A_: List[str]=5_1865,A_: Tuple=80,A_: List[Any]=6,A_: Dict=4,A_: Dict=6,A_: List[str]=4,A_: List[str]=1536,A_: int=1536,A_: List[str]=0.0,A_: Any=0.0,A_: List[str]=5_0257,A_: Tuple=True,A_: Dict=True,A_: Optional[Any]="gelu",A_: Tuple=256,A_: Dict=0.0,A_: List[Any]=0.0,A_: Dict=0.0,A_: int=0.0_2,A_: List[Any]=False,A_: List[str]=1500,A_: int=448,A_: Dict=5_0256,A_: Dict=5_0256,A_: List[str]=5_0256,A_: Dict=None,A_: List[Any]=[220, 5_0256],A_: Dict=False,A_: str=256,A_: Tuple=False,A_: List[Any]=0.0_5,A_: Dict=10,A_: Optional[int]=2,A_: List[str]=0.0,A_: Optional[Any]=10,A_: Union[str, Any]=0,A_: Dict=7,**A_: List[Any],): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,is_encoder_decoder=A_,decoder_start_token_id=A_,suppress_tokens=A_,begin_suppress_tokens=A_,**A_,) class __lowerCamelCase (_a ): @property def snake_case_ ( self: Optional[int] ): '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __UpperCamelCase = {0: 'batch'} else: __UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(A_,direction='inputs' ) return common_inputs def snake_case_ ( self: Tuple,A_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],A_: int = -1,A_: int = -1,A_: bool = False,A_: Optional["TensorType"] = None,A_: int = 2_2050,A_: float = 5.0,A_: int = 220,): '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self,preprocessor=preprocessor.feature_extractor,batch_size=A_,framework=A_,sampling_rate=A_,time_duration=A_,frequency=A_,) __UpperCamelCase = encoder_inputs['input_features'].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer,A_,A_,A_,A_ ) __UpperCamelCase = encoder_inputs.pop('input_features' ) __UpperCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def snake_case_ ( self: Optional[Any] ): '''simple docstring''' return 1E-3
1
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
0
from heapq import heappop, heappush import numpy as np def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :tuple[int, int] , _snake_case :tuple[int, int] , _snake_case :bool , ) -> tuple[float | int, list[tuple[int, int]]]: _A , _A = grid.shape _A = [-1, 1, 0, 0] _A = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] _A , _A = [(0, source)], set() _A = np.full((rows, cols) , np.inf ) _A = 0 _A = np.empty((rows, cols) , dtype=_snake_case ) _A = None while queue: ((_A) , (_A)) = heappop(_snake_case ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: _A = [] while (x, y) != source: path.append((x, y) ) _A , _A = predecessors[x, y] path.append(_snake_case ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_snake_case ) ): _A , _A = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: _A = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_snake_case , (dist + 1, (nx, ny)) ) _A = dist + 1 _A = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
2
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
0
'''simple docstring''' def A_( A : int): if divisor % 5 == 0 or divisor % 2 == 0: return 0 UpperCamelCase = 1 UpperCamelCase = 1 while repunit: UpperCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def A_( A : int = 100_0000): UpperCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(A) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"""{solution() = }""")
3
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Tuple = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
0
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") _lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) _lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) _lowercase = BeautifulSoup(res.text, """html.parser""") _lowercase = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"""https://google.com{link.get('href')}""")
5
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
0
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = SMALL_MODEL_IDENTIFIER SCREAMING_SNAKE_CASE__ = """pt""" SCREAMING_SNAKE_CASE__ = """tf""" def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__A ) def _snake_case ( self :Optional[int] , __A :Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = TFAutoModel.from_pretrained(self.test_model , from_pt=__A ) model_tf.save_pretrained(__A ) def _snake_case ( self :Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = """mock_framework""" # Framework provided - return whatever the user provides SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model , __A ) self.assertEqual(__A , __A ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__A ) SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A , __A ) self.assertEqual(__A , __A ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__A ) SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A , __A ) self.assertEqual(__A , __A ) def _snake_case ( self :Union[str, Any] ) -> str: """simple docstring""" with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__A ) SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A ) self.assertEqual(__A , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__A ) SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A ) self.assertEqual(__A , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__A ): SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) with patch("""transformers.onnx.features.is_tf_available""" , __A ): SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__A , self.framework_pt ) # PyTorch not in environment -> use TensorFlow SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) with patch("""transformers.onnx.features.is_torch_available""" , __A ): SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__A , self.framework_tf ) # Both in environment -> use PyTorch SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) with patch("""transformers.onnx.features.is_tf_available""" , __A ), patch( """transformers.onnx.features.is_torch_available""" , __A ): SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__A , self.framework_pt ) # Both not in environment -> raise error SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A ) with patch("""transformers.onnx.features.is_tf_available""" , __A ), patch( """transformers.onnx.features.is_torch_available""" , __A ): with self.assertRaises(__A ): SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
6
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
0
"""simple docstring""" import re import string import numpy as np import datasets a = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' a = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' a = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[Any]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: _A = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in predictions] ) _A = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in references] ) else: _A = np.asarray(_UpperCAmelCase ) _A = np.asarray(_UpperCAmelCase ) if ignore_case: _A = np.char.lower(_UpperCAmelCase ) _A = np.char.lower(_UpperCAmelCase ) if ignore_punctuation: _A = string.punctuation.maketrans('' , '' , string.punctuation ) _A = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase ) _A = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase ) if ignore_numbers: _A = string.digits.maketrans('' , '' , string.digits ) _A = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase ) _A = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase ) _A = predictions == references return {"exact_match": np.mean(_UpperCAmelCase ) * 100}
7
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
0
'''simple docstring''' def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Optional[int]: __A : List[str] = 0 __A : Optional[int] = len(__snake_case ) for i in range(n - 1 ): for j in range(i + 1 , __snake_case ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[Any]: if len(__snake_case ) <= 1: return arr, 0 __A : List[Any] = len(__snake_case ) // 2 __A : int = arr[0:mid] __A : List[Any] = arr[mid:] __A ,__A : Optional[Any] = count_inversions_recursive(__snake_case ) __A ,__A : Any = count_inversions_recursive(__snake_case ) __A ,__A : str = _count_cross_inversions(__snake_case , __snake_case ) __A : int = inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Tuple: __A : List[str] = [] __A : Optional[int] = 0 while i < len(__snake_case ) and j < len(__snake_case ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__snake_case ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__snake_case ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCAmelCase ( ) -> Any: __A : Optional[Any] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __A : Tuple = count_inversions_bf(__snake_case ) __A ,__A : Optional[Any] = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 8 print('number of inversions = ' , __snake_case ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __A : str = count_inversions_bf(__snake_case ) __A ,__A : Any = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , __snake_case ) # an empty list should also have zero inversions __A : Union[str, Any] = [] __A : List[Any] = count_inversions_bf(__snake_case ) __A ,__A : List[Any] = count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('number of inversions = ' , __snake_case ) if __name__ == "__main__": main()
8
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
0
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Tuple , _snake_case : Callable , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[dict] = None , _snake_case : Optional[int] = None , **_snake_case : Dict , ): """simple docstring""" super().__init__( features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , ) A__ = Generator( cache_dir=_snake_case , features=_snake_case , generator=_snake_case , gen_kwargs=_snake_case , **_snake_case , ) def _a ( self : Optional[int] ): """simple docstring""" if self.streaming: A__ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A__ = None A__ = None A__ = None A__ = None self.builder.download_and_prepare( download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , ) A__ = self.builder.as_dataset( split='train' , verification_mode=_snake_case , in_memory=self.keep_in_memory ) return dataset
9
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): # Load configuration defined in the metadata file with open(__snake_case ) as metadata_file: _UpperCamelCase = json.load(__snake_case ) _UpperCamelCase = LukeConfig(use_entity_aware_attention=__snake_case , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )['''module'''] # Load the entity vocab file _UpperCamelCase = load_original_entity_vocab(__snake_case ) # add an entry for [MASK2] _UpperCamelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 _UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCamelCase = AddedToken('''<ent>''' , lstrip=__snake_case , rstrip=__snake_case ) _UpperCamelCase = AddedToken('''<ent2>''' , lstrip=__snake_case , rstrip=__snake_case ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__snake_case ) with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''r''' ) as f: _UpperCamelCase = json.load(__snake_case ) _UpperCamelCase = '''MLukeTokenizer''' with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' ) as f: json.dump(__snake_case , __snake_case ) with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(__snake_case , __snake_case ) _UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case ) # Initialize the embeddings of the special tokens _UpperCamelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0] _UpperCamelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0] _UpperCamelCase = state_dict['''embeddings.word_embeddings.weight'''] _UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 ) _UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 ) _UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: _UpperCamelCase = state_dict[bias_name] _UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 ) _UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 ) _UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCamelCase = f"""encoder.layer.{layer_index}.attention.self.""" _UpperCamelCase = state_dict[prefix + matrix_name] _UpperCamelCase = state_dict[prefix + matrix_name] _UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' _UpperCamelCase = state_dict['''entity_predictions.bias'''] _UpperCamelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) _UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) _UpperCamelCase = LukeForMaskedLM(config=__snake_case ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) _UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): _UpperCamelCase = state_dict[key] else: _UpperCamelCase = state_dict[key] _UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case , strict=__snake_case ) if set(__snake_case ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(__snake_case ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs _UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case , task='''entity_classification''' ) _UpperCamelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' _UpperCamelCase = (0, 9) _UpperCamelCase = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' ) _UpperCamelCase = model(**__snake_case ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase = torch.Size((1, 33, 768) ) _UpperCamelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base _UpperCamelCase = torch.Size((1, 1, 768) ) _UpperCamelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction _UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case ) _UpperCamelCase = '''Tokyo is the capital of <mask>.''' _UpperCamelCase = (24, 30) _UpperCamelCase = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' ) _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = encoding['''input_ids'''][0].tolist() _UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) _UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__snake_case ) _UpperCamelCase = outputs.entity_logits[0][0].argmax().item() _UpperCamelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(__snake_case ) ) model.save_pretrained(__snake_case ) def _snake_case ( __snake_case ): _UpperCamelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]'''] _UpperCamelCase = [json.loads(__snake_case ) for line in open(__snake_case )] _UpperCamelCase = {} for entry in data: _UpperCamelCase = entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: _UpperCamelCase = entity_id break _UpperCamelCase = f"""{language}:{entity_name}""" _UpperCamelCase = entity_id return new_mapping if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) _lowerCAmelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
10
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase (__A): """simple docstring""" _a = 2 _a = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__A) if n > 1: factors.append(__A) return factors if __name__ == "__main__": import doctest doctest.testmod()
11
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
0
lowerCamelCase__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowercase__ : Tuple = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 ) lowercase__ : List[str] = state_late + state_absent + state_ontime lowercase__ : List[Any] = prizestrings return prizestrings def UpperCamelCase ( lowercase_ = 30 ) -> int: '''simple docstring''' return _calculate(lowercase_ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
12
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
0
'''simple docstring''' def UpperCAmelCase__ ( ) -> int: return 1 def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int: return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(UpperCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase_ : int = 2_00 ) -> int: return two_pound(UpperCAmelCase_ ) if __name__ == "__main__": print(solution(int(input().strip())))
13
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
0
from typing import TYPE_CHECKING from ..utils import _LazyModule a__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
14
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
0
from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : str , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Optional[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> Tuple: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> int: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Tuple ) -> int: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : Dict , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Union[str, Any] ) -> Tuple: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> Dict: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''torch''', '''transformers''', '''onnx'''] def __init__(self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ) -> Any: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def lowerCamelCase__ (cls : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
15
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
0
from collections.abc import Callable def __a ( A__ : Callable[[float], float] , A__ : float , A__ : float ): SCREAMING_SNAKE_CASE = a SCREAMING_SNAKE_CASE = b if function(A__ ) == 0: # one of the a or b is a root for the function return a elif function(A__ ) == 0: return b elif ( function(A__ ) * function(A__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: SCREAMING_SNAKE_CASE = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(A__ ) == 0: return mid elif function(A__ ) * function(A__ ) < 0: SCREAMING_SNAKE_CASE = mid else: SCREAMING_SNAKE_CASE = mid SCREAMING_SNAKE_CASE = start + (end - start) / 2.0 return mid def __a ( A__ : float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
16
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
0
from __future__ import annotations import os from collections.abc import Mapping UpperCAmelCase_ : List[str] = tuple[int, int] class lowerCamelCase_ : def __init__( self : Dict , __A : set[int] , __A : Mapping[EdgeT, int] ): __A : set[int] = vertices __A : dict[EdgeT, int] = { (min(__A ), max(__A )): weight for edge, weight in edges.items() } def lowerCAmelCase_ ( self : Union[str, Any] , __A : EdgeT , __A : int ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __A : str = weight def lowerCAmelCase_ ( self : Any ): __A : Graph = Graph({min(self.vertices )} , {} ) __A : EdgeT __A : int __A : EdgeT __A : int while len(subgraph.vertices ) < len(self.vertices ): __A : Tuple = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __A : Optional[Any] = edge __A : Tuple = weight subgraph.add_edge(__A , __A ) return subgraph def __SCREAMING_SNAKE_CASE ( a__ : str = "p107_network.txt" ) -> int: __A : str = os.path.abspath(os.path.dirname(a__ ) ) __A : str = os.path.join(a__ ,a__ ) __A : dict[EdgeT, int] = {} __A : list[str] __A : int __A : int with open(a__ ) as f: __A : Any = f.read().strip().split("""\n""" ) __A : List[str] = [line.split(""",""" ) for line in data] for edgea in range(1 ,len(a__ ) ): for edgea in range(a__ ): if adjaceny_matrix[edgea][edgea] != "-": __A : Optional[int] = int(adjaceny_matrix[edgea][edgea] ) __A : Graph = Graph(set(range(len(a__ ) ) ) ,a__ ) __A : Graph = graph.prims_algorithm() __A : int = sum(graph.edges.values() ) __A : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"""{solution() = }""")
17
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
0
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _SCREAMING_SNAKE_CASE = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _SCREAMING_SNAKE_CASE = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _SCREAMING_SNAKE_CASE = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] ) return (item, float(SCREAMING_SNAKE_CASE_ )) def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) _lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] _lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] ): '''simple docstring''' _lowerCAmelCase = list(SCREAMING_SNAKE_CASE_ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _lowerCAmelCase = random.choice(SCREAMING_SNAKE_CASE_ ) return "".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : tuple[str, float] , SCREAMING_SNAKE_CASE_ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE_ : list[str] , ): '''simple docstring''' _lowerCAmelCase = [] # Generate more children proportionally to the fitness score. _lowerCAmelCase = int(parent_a[1] * 100 ) + 1 _lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0] _lowerCAmelCase , _lowerCAmelCase = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ ) # Append new string to the population list. pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return pop def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] , SCREAMING_SNAKE_CASE_ : bool = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: _lowerCAmelCase = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(SCREAMING_SNAKE_CASE_ ) # Verify that the target contains no genes besides the ones inside genes variable. _lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _lowerCAmelCase = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(SCREAMING_SNAKE_CASE_ ) # Generate random starting population. _lowerCAmelCase = [] for _ in range(SCREAMING_SNAKE_CASE_ ): population.append("".join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) ) # Just some logs to know what the algorithms is doing. _lowerCAmelCase , _lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(SCREAMING_SNAKE_CASE_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _lowerCAmelCase = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population] # Check if there is a matching evolution. _lowerCAmelCase = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(SCREAMING_SNAKE_CASE_ ) # Normalize population score to be between 0 and 1. _lowerCAmelCase = [ (item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score ] # This is selection for i in range(SCREAMING_SNAKE_CASE_ ): population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION: break if __name__ == "__main__": _SCREAMING_SNAKE_CASE = ( "This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!" ) _SCREAMING_SNAKE_CASE = list( " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
18
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
0
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowerCamelCase__ ( __snake_case, __snake_case, **__snake_case ) -> Tuple: """simple docstring""" _UpperCamelCase = AutoConfig.from_pretrained(__snake_case, **__snake_case ) _UpperCamelCase = AutoModelForSeqaSeqLM.from_config(__snake_case ) model.save_pretrained(__snake_case ) AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
19
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase_ : def __init__( self , lowercase_ , lowercase_=2 , lowercase_=True , lowercase_=False , lowercase_=10 , lowercase_=3 , lowercase_=32 * 8 , lowercase_=32 * 8 , lowercase_=4 , lowercase_=64 , ) -> Union[str, Any]: a__ =parent a__ =batch_size a__ =is_training a__ =use_auxiliary_loss a__ =num_queries a__ =num_channels a__ =min_size a__ =max_size a__ =num_labels a__ =hidden_dim a__ =hidden_dim def __UpperCamelCase ( self) -> int: a__ =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( lowercase_) a__ =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_) a__ =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_) > 0.5 ).float() a__ =(torch.rand((self.batch_size, self.num_labels) , device=lowercase_) > 0.5).long() a__ =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __UpperCamelCase ( self) -> List[Any]: a__ =MaskaFormerConfig( hidden_size=self.hidden_dim , ) a__ =self.num_queries a__ =self.num_labels a__ =[1, 1, 1, 1] a__ =self.num_channels a__ =64 a__ =128 a__ =self.hidden_dim a__ =self.hidden_dim a__ =self.hidden_dim return config def __UpperCamelCase ( self) -> str: a__ , a__ , a__ , a__ , a__ =self.prepare_config_and_inputs() a__ ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Union[str, Any]: a__ =output.encoder_hidden_states a__ =output.pixel_decoder_hidden_states a__ =output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths)) self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths)) self.parent.assertTrue(len(lowercase_) , config.decoder_layers) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> Any: with torch.no_grad(): a__ =MaskaFormerModel(config=lowercase_) model.to(lowercase_) model.eval() a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_) a__ =model(lowercase_ , output_hidden_states=lowercase_) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(lowercase_ , lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any: a__ =MaskaFormerForUniversalSegmentation(config=lowercase_) model.to(lowercase_) model.eval() def comm_check_on_output(lowercase_): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_) a__ =model(lowercase_) comm_check_on_output(lowercase_) a__ =model( pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_) comm_check_on_output(lowercase_) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ): snake_case =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case ={'feature-extraction': MaskaFormerModel} if is_torch_available() else {} snake_case =False snake_case =False snake_case =False snake_case =False def __UpperCamelCase ( self) -> Optional[int]: a__ =MaskaFormerModelTester(self) a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_) def __UpperCamelCase ( self) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCamelCase ( self) -> List[str]: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_) def __UpperCamelCase ( self) -> List[str]: a__ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase_) @unittest.skip(reason='Mask2Former does not use inputs_embeds') def __UpperCamelCase ( self) -> str: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method') def __UpperCamelCase ( self) -> Tuple: pass @unittest.skip(reason='Mask2Former is not a generative model') def __UpperCamelCase ( self) -> List[str]: pass @unittest.skip(reason='Mask2Former does not use token embeddings') def __UpperCamelCase ( self) -> Tuple: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`') def __UpperCamelCase ( self) -> Union[str, Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def __UpperCamelCase ( self) -> str: pass def __UpperCamelCase ( self) -> Optional[Any]: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_) a__ =inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ =[*signature.parameters.keys()] a__ =['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_) @slow def __UpperCamelCase ( self) -> Any: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: a__ =MaskaFormerModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def __UpperCamelCase ( self) -> Dict: a__ =(self.model_tester.min_size,) * 2 a__ ={ 'pixel_values': torch.randn((2, 3, *size) , device=lowercase_), 'mask_labels': torch.randn((2, 10, *size) , device=lowercase_), 'class_labels': torch.zeros(2 , 10 , device=lowercase_).long(), } a__ =self.model_tester.get_config() a__ =MaskaFormerForUniversalSegmentation(lowercase_).to(lowercase_) a__ =model(**lowercase_) self.assertTrue(outputs.loss is not None) def __UpperCamelCase ( self) -> Tuple: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_) def __UpperCamelCase ( self) -> int: a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ =model_class(lowercase_).to(lowercase_) a__ =model(**lowercase_ , output_attentions=lowercase_) self.assertTrue(outputs.attentions is not None) def __UpperCamelCase ( self) -> Union[str, Any]: if not self.model_tester.is_training: return a__ =self.all_model_classes[1] a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs() a__ =model_class(lowercase_) model.to(lowercase_) model.train() a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_).loss loss.backward() def __UpperCamelCase ( self) -> Union[str, Any]: a__ =self.all_model_classes[1] a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs() a__ =True a__ =True a__ =model_class(lowercase_).to(lowercase_) model.train() a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_) a__ =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a__ =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() a__ =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a__ =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowercase_) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) _lowerCAmelCase: str = 1e-4 def _lowercase( ): a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowercase_ (unittest.TestCase ): @cached_property def __UpperCamelCase ( self) -> Tuple: return "facebook/mask2former-swin-small-coco-instance" @cached_property def __UpperCamelCase ( self) -> Optional[Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def __UpperCamelCase ( self) -> str: a__ =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(lowercase_) a__ =self.default_image_processor a__ =prepare_img() a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_) a__ =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(lowercase_ , (1, 3, 384, 384)) with torch.no_grad(): a__ =model(**lowercase_) a__ =torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(lowercase_) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_)) a__ =torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(lowercase_) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_)) a__ =torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(lowercase_) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_)) def __UpperCamelCase ( self) -> Any: a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval() a__ =self.default_image_processor a__ =prepare_img() a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_) a__ =inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(lowercase_ , (1, 3, 384, 384)) with torch.no_grad(): a__ =model(**lowercase_) # masks_queries_logits a__ =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) a__ =[ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] a__ =torch.tensor(lowercase_).to(lowercase_) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_)) # class_queries_logits a__ =outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) a__ =torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(lowercase_) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_)) def __UpperCamelCase ( self) -> Optional[Any]: a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval() a__ =self.default_image_processor a__ =image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , ) a__ =inputs['pixel_values'].to(lowercase_) a__ =[el.to(lowercase_) for el in inputs['mask_labels']] a__ =[el.to(lowercase_) for el in inputs['class_labels']] with torch.no_grad(): a__ =model(**lowercase_) self.assertTrue(outputs.loss is not None)
20
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
0
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ): __magic_name__ : int =AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase ) __magic_name__ : List[str] =AutoModelForSeqaSeqLM.from_config(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
21
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _snake_case : Dict = logging.get_logger(__name__) _snake_case : int = { 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class A ( _a ): lowercase_ = 'dpt' def __init__( self : List[Any] , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[Any]=30_72 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : Any=1e-12 , lowerCAmelCase_ : List[Any]=3_84 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Dict=[2, 5, 8, 11] , lowerCAmelCase_ : Optional[Any]="project" , lowerCAmelCase_ : int=[4, 2, 1, 0.5] , lowerCAmelCase_ : Optional[Any]=[96, 1_92, 3_84, 7_68] , lowerCAmelCase_ : List[Any]=2_56 , lowerCAmelCase_ : Optional[int]=-1 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=0.4 , lowerCAmelCase_ : List[str]=2_55 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=[1, 10_24, 24, 24] , lowerCAmelCase_ : Optional[int]=[0, 1] , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = hidden_size _a = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) _a = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } _a = BitConfig(**lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): logger.info('''Initializing the config with a `BiT` backbone.''' ) _a = BitConfig(**lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _a = backbone_config else: raise ValueError( F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' ) _a = backbone_featmap_shape _a = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: _a = None _a = None _a = [] _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = layer_norm_eps _a = image_size _a = patch_size _a = num_channels _a = qkv_bias _a = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) _a = readout_type _a = reassemble_factors _a = neck_hidden_sizes _a = fusion_hidden_size _a = head_in_index _a = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _a = use_auxiliary_head _a = auxiliary_loss_weight _a = semantic_loss_ignore_index _a = semantic_classifier_dropout def __lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" _a = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
22
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
0
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run snake_case__ : Any = True except (ImportError, AttributeError): snake_case__ : Dict = object def _snake_case (*__lowercase , **__lowercase): pass snake_case__ : Union[str, Any] = False snake_case__ : Optional[int] = logging.get_logger("""transformers-cli/serving""") def _snake_case (__lowercase): UpperCamelCase_ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(__lowercase , args.host , args.port , args.workers) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 class _a ( UpperCAmelCase__ ): """simple docstring""" @staticmethod def _UpperCAmelCase ( _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = parser.add_parser( 'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' ) serve_parser.add_argument( '--task' , type=_UpperCAmelCase , choices=get_supported_tasks() , help='The task to run the pipeline on' , ) serve_parser.add_argument('--host' , type=_UpperCAmelCase , default='localhost' , help='Interface the server will listen on.' ) serve_parser.add_argument('--port' , type=_UpperCAmelCase , default=8888 , help='Port the serving will listen to.' ) serve_parser.add_argument('--workers' , type=_UpperCAmelCase , default=1 , help='Number of http workers' ) serve_parser.add_argument('--model' , type=_UpperCAmelCase , help='Model\'s name or path to stored model.' ) serve_parser.add_argument('--config' , type=_UpperCAmelCase , help='Model\'s config name or path to stored model.' ) serve_parser.add_argument('--tokenizer' , type=_UpperCAmelCase , help='Tokenizer name to use.' ) serve_parser.add_argument( '--device' , type=_UpperCAmelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) serve_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = pipeline UpperCamelCase_ = host UpperCamelCase_ = port UpperCamelCase_ = workers if not _serve_dependencies_installed: raise RuntimeError( 'Using serve command requires FastAPI and uvicorn. ' 'Please install transformers with [serving]: pip install "transformers[serving]".' 'Or install FastAPI and uvicorn separately.' ) else: logger.info(f"""Serving model over {host}:{port}""" ) UpperCamelCase_ = FastAPI( routes=[ APIRoute( '/' , self.model_info , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['GET'] , ), APIRoute( '/tokenize' , self.tokenize , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ), APIRoute( '/detokenize' , self.detokenize , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ), APIRoute( '/forward' , self.forward , response_model=_UpperCAmelCase , response_class=_UpperCAmelCase , methods=['POST'] , ), ] , timeout=600 , ) def _UpperCAmelCase ( self ) -> Any: run(self._app , host=self.host , port=self.port , workers=self.workers ) def _UpperCAmelCase ( self ) -> Optional[int]: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def _UpperCAmelCase ( self , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) ) -> Union[str, Any]: try: UpperCamelCase_ = self._pipeline.tokenizer.tokenize(_UpperCAmelCase ) if return_ids: UpperCamelCase_ = self._pipeline.tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) return ServeTokenizeResult(tokens=_UpperCAmelCase , tokens_ids=_UpperCAmelCase ) else: return ServeTokenizeResult(tokens=_UpperCAmelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_UpperCAmelCase )} ) def _UpperCAmelCase ( self , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , _UpperCAmelCase = Body(_UpperCAmelCase , embed=_UpperCAmelCase ) , ) -> int: try: UpperCamelCase_ = self._pipeline.tokenizer.decode(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return ServeDeTokenizeResult(model='' , text=_UpperCAmelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(_UpperCAmelCase )} ) async def _UpperCAmelCase ( self , _UpperCAmelCase=Body(_UpperCAmelCase , embed=_UpperCAmelCase ) ) -> Tuple: # Check we don't have empty string if len(_UpperCAmelCase ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model UpperCamelCase_ = self._pipeline(_UpperCAmelCase ) return ServeForwardResult(output=_UpperCAmelCase ) except Exception as e: raise HTTPException(500 , {'error': str(_UpperCAmelCase )} )
23
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : Optional[Any] = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int: '''simple docstring''' for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __snake_case = '''lm_head''' __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value else: __snake_case = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> str: '''simple docstring''' __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): __snake_case = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2] __snake_case = mapped_key.replace('''*''' , _lowerCamelCase ) if "weight_g" in name: __snake_case = '''weight_g''' elif "weight_v" in name: __snake_case = '''weight_v''' elif "bias" in name: __snake_case = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = '''weight''' else: __snake_case = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] )-> List[str]: '''simple docstring''' __snake_case = full_name.split('''conv_layers.''' )[-1] __snake_case = name.split('''.''' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True )-> Optional[int]: '''simple docstring''' if config_path is not None: __snake_case = UniSpeechConfig.from_pretrained(_lowerCamelCase ) else: __snake_case = UniSpeechConfig() if is_finetuned: if dict_path: __snake_case = Dictionary.load_from_json(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case = target_dict.pad_index __snake_case = target_dict.bos_index __snake_case = target_dict.eos_index __snake_case = len(target_dict.symbols ) __snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' ) if not os.path.isdir(_lowerCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) __snake_case = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case = 42 __snake_case = 43 with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) __snake_case = WavaVecaPhonemeCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , ) __snake_case = True if config.feat_extract_norm == '''layer''' else False __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) __snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case = UniSpeechForCTC(_lowerCamelCase ) else: __snake_case = UniSpeechForPreTraining(_lowerCamelCase ) if is_finetuned: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __snake_case = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_unispeech.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json', 'BridgeTower/bridgetower-base-itm-mlm': ( 'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json' ), } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='bridgetower_vision_model' def __init__( self : Tuple , a : Any=768 , a : Optional[Any]=12 , a : Dict=3 , a : Dict=16 , a : Optional[Any]=288 , a : List[Any]=1 , a : Tuple=1e-05 , a : Union[str, Any]=False , a : Any=True , a : Optional[int]=False , **a : Optional[Any] , ) -> Optional[int]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : int = patch_size SCREAMING_SNAKE_CASE : Tuple = image_size SCREAMING_SNAKE_CASE : Dict = initializer_factor SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = stop_gradient SCREAMING_SNAKE_CASE : Tuple = share_layernorm SCREAMING_SNAKE_CASE : Any = remove_last_layer @classmethod def __UpperCamelCase ( cls : str , a : Union[str, os.PathLike] , **a : Dict ) -> "PretrainedConfig": """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(a , **a ) if config_dict.get("model_type" ) == "bridgetower": SCREAMING_SNAKE_CASE : Dict = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a , **a ) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='bridgetower_text_model' def __init__( self : str , a : int=5_0265 , a : List[Any]=768 , a : int=12 , a : Optional[Any]=12 , a : Union[str, Any]=1 , a : str=3072 , a : List[Any]="gelu" , a : Any=0.1 , a : List[str]=0.1 , a : Tuple=514 , a : Optional[int]=1 , a : Tuple=1e-05 , a : Union[str, Any]=1 , a : Any=0 , a : str=2 , a : str="absolute" , a : Any=True , **a : Dict , ) -> Optional[int]: """simple docstring""" super().__init__(**a ) SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_factor SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type SCREAMING_SNAKE_CASE : List[str] = use_cache SCREAMING_SNAKE_CASE : List[str] = pad_token_id SCREAMING_SNAKE_CASE : Tuple = bos_token_id SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id @classmethod def __UpperCamelCase ( cls : Tuple , a : Union[str, os.PathLike] , **a : List[str] ) -> "PretrainedConfig": """simple docstring""" SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(a , **a ) if config_dict.get("model_type" ) == "bridgetower": SCREAMING_SNAKE_CASE : str = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a , **a ) class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='bridgetower' def __init__( self : List[str] , a : List[Any]=True , a : List[Any]="gelu" , a : Union[str, Any]=768 , a : Tuple=1 , a : List[Any]=1e-05 , a : Dict=False , a : Optional[int]="add" , a : Any=12 , a : Optional[int]=6 , a : int=False , a : Optional[int]=False , a : Dict=None , a : Tuple=None , **a : Union[str, Any] , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : str = kwargs.pop("text_config_dict" , a ) SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("vision_config_dict" , a ) super().__init__(**a ) SCREAMING_SNAKE_CASE : Any = share_cross_modal_transformer_layers SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : Tuple = initializer_factor SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : int = share_link_tower_layers SCREAMING_SNAKE_CASE : str = link_tower_type SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = tie_word_embeddings SCREAMING_SNAKE_CASE : Any = init_layernorm_from_vision_encoder if text_config is None: SCREAMING_SNAKE_CASE : Any = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerTextConfig(**a ) SCREAMING_SNAKE_CASE : Optional[Any] = BridgeTowerVisionConfig(**a ) @classmethod def __UpperCamelCase ( cls : List[Any] , a : BridgeTowerTextConfig , a : BridgeTowerVisionConfig , **a : List[str] ) -> Optional[int]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a ) def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : str = self.text_config.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type return output
25
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
0
'''simple docstring''' from __future__ import annotations import math def _a ( _lowerCamelCase , _lowerCamelCase ) -> float: """simple docstring""" __snake_case : Any = u for i in range(1 , _lowerCamelCase ): __snake_case : Optional[Any] = temp * (u - i) return temp def _a ( ) -> None: """simple docstring""" __snake_case : str = int(input("""enter the numbers of values: """ ) ) __snake_case : list[list[float]] = [] for _ in range(_lowerCamelCase ): y.append([] ) for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): y[i].append(_lowerCamelCase ) __snake_case : Any = 0 print("""enter the values of parameters in a list: """ ) __snake_case : Dict = list(map(_lowerCamelCase , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(_lowerCamelCase ): __snake_case : Optional[int] = float(input() ) __snake_case : List[str] = int(input("""enter the value to interpolate: """ ) ) __snake_case : int = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _lowerCamelCase ): for j in range(n - i ): __snake_case : List[Any] = y[j + 1][i - 1] - y[j][i - 1] __snake_case : Union[str, Any] = y[0][0] for i in range(1 , _lowerCamelCase ): summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
26
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
0
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _A = int(_SCREAMING_SNAKE_CASE ) if decimal in (0, 1): # Exit cases for the recursion return str(_SCREAMING_SNAKE_CASE ) _A, _A = divmod(_SCREAMING_SNAKE_CASE , 2 ) return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _A = str(_SCREAMING_SNAKE_CASE ).strip() if not number: raise ValueError('No input value was provided' ) _A = '-' if number.startswith('-' ) else '' _A = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}" if __name__ == "__main__": from doctest import testmod testmod()
27
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : Optional[int] = CpmAntTokenizer A : Dict = False def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE : List[str] = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) SCREAMING_SNAKE_CASE : Tuple = '今天天气真好!' SCREAMING_SNAKE_CASE : Tuple = ['今天', '天气', '真', '好', '!'] SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(A ) self.assertListEqual(A, A ) SCREAMING_SNAKE_CASE : int = '今天天气真好!' SCREAMING_SNAKE_CASE : Dict = [tokenizer.bos_token] + tokens SCREAMING_SNAKE_CASE : List[Any] = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(A ) self.assertEqual(A, A )
28
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ ): return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
29
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __a = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __a( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase = None def lowerCamelCase__ ( _lowercase , _lowercase , ): '''simple docstring''' import pyspark def generate_fn(): UpperCAmelCase_ : Optional[int] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: UpperCAmelCase_ : Union[str, Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) UpperCAmelCase_ : int = partition_df.collect() UpperCAmelCase_ : str = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __a( _BaseExamplesIterable ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,) -> Dict: UpperCAmelCase_ : str = df UpperCAmelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() ) UpperCAmelCase_ : str = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self ) -> Dict: yield from self.generate_examples_fn() def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable": UpperCAmelCase_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_SCREAMING_SNAKE_CASE ) return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> "SparkExamplesIterable": UpperCAmelCase_ : Dict = self.split_shard_indices_by_worker(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return SparkExamplesIterable(self.df ,partition_order=_SCREAMING_SNAKE_CASE ) @property def a__ ( self ) -> int: return len(self.partition_order ) class __a( datasets.DatasetBuilder ): """simple docstring""" lowerCAmelCase = SparkConfig def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]: import pyspark UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate() UpperCAmelCase_ : Union[str, Any] = df UpperCAmelCase_ : Optional[Any] = working_dir super().__init__( cache_dir=_SCREAMING_SNAKE_CASE ,config_name=str(self.df.semanticHash() ) ,**_SCREAMING_SNAKE_CASE ,) def a__ ( self ) -> int: # Returns the path of the created file. def create_cache_and_write_probe(_SCREAMING_SNAKE_CASE ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = os.path.join(self._cache_dir ,'''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_SCREAMING_SNAKE_CASE ,'''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' ,'''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: UpperCAmelCase_ : Tuple = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_SCREAMING_SNAKE_CASE ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def a__ ( self ) -> Tuple: return datasets.DatasetInfo(features=self.config.features ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: import pyspark def get_arrow_batch_size(_SCREAMING_SNAKE_CASE ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) UpperCAmelCase_ : Union[str, Any] = self.df.count() UpperCAmelCase_ : Tuple = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. UpperCAmelCase_ : Tuple = ( self.df.limit(_SCREAMING_SNAKE_CASE ) .repartition(1 ) .mapInArrow(_SCREAMING_SNAKE_CASE ,'''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) UpperCAmelCase_ : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. UpperCAmelCase_ : List[Any] = min(_SCREAMING_SNAKE_CASE ,int(approx_total_size / max_shard_size ) ) UpperCAmelCase_ : List[str] = self.df.repartition(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark UpperCAmelCase_ : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter UpperCAmelCase_ : Dict = os.path.join(self._working_dir ,os.path.basename(_SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath UpperCAmelCase_ : List[Any] = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. UpperCAmelCase_ : Any = self.config.features UpperCAmelCase_ : Any = self._writer_batch_size UpperCAmelCase_ : Optional[Any] = self._fs.storage_options def write_arrow(_SCREAMING_SNAKE_CASE ): # Within the same SparkContext, no two task attempts will share the same attempt ID. UpperCAmelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId() UpperCAmelCase_ : List[str] = next(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,) UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = writer_class( features=_SCREAMING_SNAKE_CASE ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : str = pa.Table.from_batches([first_batch] ) writer.write_table(_SCREAMING_SNAKE_CASE ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: UpperCAmelCase_, UpperCAmelCase_ : Tuple = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,) shard_id += 1 UpperCAmelCase_ : Optional[int] = writer_class( features=writer._features ,path=working_fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,writer_batch_size=_SCREAMING_SNAKE_CASE ,storage_options=_SCREAMING_SNAKE_CASE ,embed_local_files=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : Optional[int] = pa.Table.from_batches([batch] ) writer.write_table(_SCREAMING_SNAKE_CASE ) if writer._num_bytes > 0: UpperCAmelCase_, UpperCAmelCase_ : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=['''task_id''', '''num_examples''', '''num_bytes'''] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_SCREAMING_SNAKE_CASE ) ): UpperCAmelCase_ : Dict = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) ,os.path.basename(_SCREAMING_SNAKE_CASE ) ) shutil.move(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = ( self.df.mapInArrow(_SCREAMING_SNAKE_CASE ,'''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) ,pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) ,pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) ,pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = "arrow" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> int: self._validate_cache_dir() UpperCAmelCase_ : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = not is_remote_filesystem(self._fs ) UpperCAmelCase_ : Dict = os.path.join if is_local else posixpath.join UpperCAmelCase_ : str = '''-TTTTT-SSSSS-of-NNNNN''' UpperCAmelCase_ : List[Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' UpperCAmelCase_ : Optional[Any] = path_join(self._output_dir ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Dict = [] for task_id, content in self._prepare_split_single(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): ( ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ) : Tuple = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = total_num_examples UpperCAmelCase_ : str = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: UpperCAmelCase_ : str = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. UpperCAmelCase_ : List[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,): rename( _SCREAMING_SNAKE_CASE ,fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace('''TTTTT-SSSSS''' ,f'''{global_shard_id:05d}''' ).replace('''NNNNN''' ,f'''{total_shards:05d}''' ) ,) UpperCAmelCase_ : str = [] UpperCAmelCase_ : Any = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): UpperCAmelCase_, UpperCAmelCase_ : Tuple = task_id_and_num_shards[i] for shard_id in range(_SCREAMING_SNAKE_CASE ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ).map(lambda _SCREAMING_SNAKE_CASE : _rename_shard(*_SCREAMING_SNAKE_CASE ) ).collect() else: # don't use any pattern UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : List[Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' ,f'''{shard_id:05d}''' ).replace('''TTTTT''' ,f'''{task_id:05d}''' ) ,fpath.replace(_SCREAMING_SNAKE_CASE ,'''''' ) ,) def a__ ( self ,_SCREAMING_SNAKE_CASE ,) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
30
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
0
lowerCamelCase__ : int = 8.314_4598 def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float ) -> float: if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example lowerCamelCase__ : str = 300 lowerCamelCase__ : int = 28 lowerCamelCase__ : Optional[int] = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
31
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
0
import baseaa def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str: """simple docstring""" return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase_ = "Hello World!" UpperCAmelCase_ = baseaa_encode(test) print(encoded) UpperCAmelCase_ = baseaa_decode(encoded) print(decoded)
32
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = '''ylacombe/bark-small''' snake_case__ = tempfile.mkdtemp() snake_case__ = '''en_speaker_1''' snake_case__ = '''This is a test string''' snake_case__ = '''speaker_embeddings_path.json''' snake_case__ = '''speaker_embeddings''' def SCREAMING_SNAKE_CASE__ ( self:Any , **_a:List[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self.get_tokenizer() snake_case__ = BarkProcessor(tokenizer=_a ) processor.save_pretrained(self.tmpdirname ) snake_case__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) snake_case__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) snake_case__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) snake_case__ = 35 snake_case__ = 2 snake_case__ = 8 snake_case__ = { '''semantic_prompt''': np.ones(_a ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset snake_case__ = processor(text=self.input_string , voice_preset=_a ) snake_case__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from npz file snake_case__ = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(_a , **_a ) snake_case__ = processor(text=self.input_string , voice_preset=_a ) snake_case__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() ) # test loading voice preset from the hub snake_case__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = self.get_tokenizer() snake_case__ = BarkProcessor(tokenizer=_a ) snake_case__ = processor(text=self.input_string ) snake_case__ = tokenizer( self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
33
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
34
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowercase ( unittest.TestCase ): def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : Tuple = 0 def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_lowercase , _lowercase ) def lowercase__ ( self : Any ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : List[Any] = Path(_lowercase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) def lowercase__ ( self : Dict ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) def lowercase__ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : int = CLIPConfig() # Create a dummy config file with image_proceesor_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained(_lowercase ).to_dict() config_dict.pop('''image_processor_type''' ) SCREAMING_SNAKE_CASE__ : List[str] = CLIPImageProcessor(**_lowercase ) # save in new folder model_config.save_pretrained(_lowercase ) config.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE__ : List[str] = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_lowercase , _lowercase ) def lowercase__ ( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) def lowercase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( _lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ): SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''clip-base''' ) def lowercase__ ( self : str ): with self.assertRaisesRegex( _lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase , revision='''aaaaaa''' ) def lowercase__ ( self : List[str] ): with self.assertRaisesRegex( _lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase__ ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowercase ): SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowercase ): SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def lowercase__ ( self : List[str] ): try: AutoConfig.register('''custom''' , _lowercase ) AutoImageProcessor.register(_lowercase , _lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowercase ): AutoImageProcessor.register(_lowercase , _lowercase ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : int = Path(_lowercase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] = CustomImageProcessor.from_pretrained(_lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Optional[Any] ): class lowercase ( _UpperCAmelCase ): lowerCamelCase : Union[str, Any] = True try: AutoConfig.register('''custom''' , _lowercase ) AutoImageProcessor.register(_lowercase , _lowercase ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_lowercase , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
35
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
0
from __future__ import annotations import collections import pprint from pathlib import Path def lowercase ( __A : str ) -> str: '''simple docstring''' return "".join(sorted(__A ) ) def lowercase ( __A : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(__A )] __lowercase : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') __lowercase : Any = sorted({word.strip().lower() for word in data.splitlines()}) __lowercase : List[str] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __lowercase : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
36
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
0
from math import ceil def UpperCamelCase_ ( __a = 1_001 ) -> int: a__ : Optional[Any] = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): a__ : List[str] = 2 * i + 1 a__ : Optional[int] = 2 * i a__ : Dict = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: UpperCamelCase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
37
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
0
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> str: '''simple docstring''' snake_case__ : List[str] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Tuple = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : Optional[Any] = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : List[str] = """.""".join(__magic_name__ ) return test_module_path def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> Dict: '''simple docstring''' snake_case__ : str = get_module_path(__magic_name__ ) snake_case__ : Optional[Any] = importlib.import_module(__magic_name__ ) return test_module def UpperCamelCase__ ( __magic_name__ : str ) -> Optional[Any]: '''simple docstring''' snake_case__ : Any = [] snake_case__ : Any = get_test_module(__magic_name__ ) for attr in dir(__magic_name__ ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(__magic_name__ , __magic_name__ ) ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]: '''simple docstring''' snake_case__ : Union[str, Any] = [] snake_case__ : Union[str, Any] = get_test_module(__magic_name__ ) for attr in dir(__magic_name__ ): snake_case__ : List[str] = getattr(__magic_name__ , __magic_name__ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : Any = getattr(__magic_name__ , """all_model_classes""" , [] ) if len(__magic_name__ ) > 0: test_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[str]: '''simple docstring''' snake_case__ : Optional[Any] = get_test_classes(__magic_name__ ) snake_case__ : Union[str, Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def UpperCamelCase__ ( __magic_name__ : str ) -> Optional[Any]: '''simple docstring''' snake_case__ : Optional[int] = test_class() if hasattr(__magic_name__ , """setUp""" ): test.setUp() snake_case__ : List[Any] = None if hasattr(__magic_name__ , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Optional[Any] = test.model_tester.__class__ return model_tester def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : int ) -> Dict: '''simple docstring''' snake_case__ : List[str] = get_test_classes(__magic_name__ ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Dict ) -> Optional[int]: '''simple docstring''' snake_case__ : Tuple = get_test_classes_for_model(__magic_name__ , __magic_name__ ) snake_case__ : List[Any] = [] for test_class in test_classes: snake_case__ : List[str] = get_model_tester_from_test_class(__magic_name__ ) if tester_class is not None: tester_classes.append(__magic_name__ ) # sort with class names return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ ) def UpperCamelCase__ ( __magic_name__ : Tuple ) -> Optional[int]: '''simple docstring''' snake_case__ : List[Any] = get_test_classes(__magic_name__ ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(__magic_name__ ) for test_class in test_classes} return test_tester_mapping def UpperCamelCase__ ( __magic_name__ : Tuple ) -> str: '''simple docstring''' snake_case__ : Any = get_model_classes(__magic_name__ ) snake_case__ : str = { model_class: get_test_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes } return model_test_mapping def UpperCamelCase__ ( __magic_name__ : Dict ) -> Union[str, Any]: '''simple docstring''' snake_case__ : int = get_model_classes(__magic_name__ ) snake_case__ : Union[str, Any] = { model_class: get_tester_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes } return model_to_tester_mapping def UpperCamelCase__ ( __magic_name__ : Tuple ) -> Any: '''simple docstring''' if isinstance(__magic_name__ , __magic_name__ ): return o elif isinstance(__magic_name__ , __magic_name__ ): return o.__name__ elif isinstance(__magic_name__ , (list, tuple) ): return [to_json(__magic_name__ ) for x in o] elif isinstance(__magic_name__ , __magic_name__ ): return {to_json(__magic_name__ ): to_json(__magic_name__ ) for k, v in o.items()} else: return o
38
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
0
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
39
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
0
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def UpperCamelCase ( snake_case__ : str , snake_case__ : str , snake_case__ : Optional[str] = None ) -> str: if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path UpperCamelCase : Optional[int] = quote(snake_case__ ) return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' , revision=snake_case__ )
40
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = KandinskyVaaImgaImgPipeline SCREAMING_SNAKE_CASE : List[str] = ['image_embeds', 'negative_image_embeds', 'image'] SCREAMING_SNAKE_CASE : List[Any] = [ 'image_embeds', 'negative_image_embeds', 'image', ] SCREAMING_SNAKE_CASE : int = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] SCREAMING_SNAKE_CASE : int = False @property def SCREAMING_SNAKE_CASE ( self : int ): return 3_2 @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): return 3_2 @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): return self.time_input_dim @property def SCREAMING_SNAKE_CASE ( self : Tuple ): return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE ( self : Dict ): return 1_0_0 @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): torch.manual_seed(0 ) __lowercase = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __lowercase = UNetaDConditionModel(**lowercase__ ) return model @property def SCREAMING_SNAKE_CASE ( self : Dict ): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): torch.manual_seed(0 ) __lowercase = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.dummy_unet __lowercase = self.dummy_movq __lowercase = { '''num_train_timesteps''': 1_0_0_0, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } __lowercase = DDIMScheduler(**lowercase__ ) __lowercase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : List[str]=0 ): __lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowercase__ ) ).to(lowercase__ ) __lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( lowercase__ ) # create init_image __lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(lowercase__ ) ).to(lowercase__ ) __lowercase = image.cpu().permute(0 ,2 ,3 ,1 )[0] __lowercase = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) ) if str(lowercase__ ).startswith('''mps''' ): __lowercase = torch.manual_seed(lowercase__ ) else: __lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __lowercase = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''num_inference_steps''': 1_0, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = '''cpu''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**lowercase__ ) __lowercase = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) __lowercase = pipe(**self.get_dummy_inputs(lowercase__ ) ) __lowercase = output.images __lowercase = pipe( **self.get_dummy_inputs(lowercase__ ) ,return_dict=lowercase__ ,)[0] __lowercase = image[0, -3:, -3:, -1] __lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) __lowercase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __lowercase = '''A red cartoon frog, 4k''' __lowercase = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa ) pipe_prior.to(lowercase__ ) __lowercase = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' ,torch_dtype=torch.floataa ) __lowercase = pipeline.to(lowercase__ ) pipeline.set_progress_bar_config(disable=lowercase__ ) __lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowercase , __lowercase = pipe_prior( lowercase__ ,generator=lowercase__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple() __lowercase = pipeline( image=lowercase__ ,image_embeds=lowercase__ ,negative_image_embeds=lowercase__ ,generator=lowercase__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,strength=0.2 ,output_type='''np''' ,) __lowercase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowercase__ ,lowercase__ )
41
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
0
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int: assert isinstance(__UpperCamelCase ,__UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( 'features' ,[ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] ,) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowerCamelCase_ = features.copy() if features else default_expected_features lowerCamelCase_ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( 'features' ,[ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] ,) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} lowerCamelCase_ = features.copy() if features else default_expected_features lowerCamelCase_ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() assert isinstance(__UpperCamelCase ,__UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCamelCase_ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} lowerCamelCase_ = features.copy() lowerCamelCase_ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() assert isinstance(__UpperCamelCase ,__UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ,split=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase ,__UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' ,[str, list] ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: if issubclass(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase_ = jsonl_path elif issubclass(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase_ = [jsonl_path] lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_json_dataset(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=("train",) ) -> Optional[int]: assert isinstance(__UpperCamelCase ,__UpperCamelCase ) for split in splits: lowerCamelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} ,cache_dir=__UpperCamelCase ,keep_in_memory=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( 'features' ,[ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] ,) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowerCamelCase_ = features.copy() if features else default_expected_features lowerCamelCase_ = ( Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ = JsonDatasetReader({'train': jsonl_path} ,features=__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: if split: lowerCamelCase_ = {split: jsonl_path} else: lowerCamelCase_ = 'train' lowerCamelCase_ = {'train': jsonl_path, 'test': jsonl_path} lowerCamelCase_ = tmp_path / 'cache' lowerCamelCase_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowerCamelCase_ = JsonDatasetReader(__UpperCamelCase ,cache_dir=__UpperCamelCase ).read() _check_json_datasetdict(__UpperCamelCase ,__UpperCamelCase ,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]: return json.load(__UpperCamelCase ) def _UpperCamelCase ( __UpperCamelCase ) -> List[str]: return [json.loads(__UpperCamelCase ) for line in buffer] class UpperCAmelCase : '''simple docstring''' @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ ).write() buffer.seek(0 ) lowerCamelCase_ = load_json_function(SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ ).write() buffer.seek(0 ) lowerCamelCase_ = load_json(SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(SCREAMING_SNAKE_CASE_ ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write() buffer.seek(0 ) lowerCamelCase_ = load_json_function(SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write() buffer.seek(0 ) lowerCamelCase_ = load_json(SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(SCREAMING_SNAKE_CASE_ ) == 10 def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: '''simple docstring''' with pytest.raises(SCREAMING_SNAKE_CASE_ ): with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' lowerCamelCase_ = tmp_path_factory.mktemp('data' ) / f'''test.json.{extension}''' lowerCamelCase_ = str(shared_datadir / f'''test_file.json.{extension}''' ) JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compression=SCREAMING_SNAKE_CASE_ ).write() with fsspec.open(SCREAMING_SNAKE_CASE_ , 'rb' , compression='infer' ) as f: lowerCamelCase_ = f.read() with fsspec.open(SCREAMING_SNAKE_CASE_ , 'rb' , compression='infer' ) as f: lowerCamelCase_ = f.read() assert exported_content == original_content
42
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
0
lowerCAmelCase = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowerCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}] lowerCAmelCase = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
43
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
0
'''simple docstring''' def A_ ( _lowerCAmelCase : list[list] ): """simple docstring""" _lowerCamelCase : List[str] = current_set.copy() for row_index, row in enumerate(_lowerCAmelCase ): _lowerCamelCase : Optional[int] = row[0] for column_index, column in enumerate(_lowerCAmelCase ): if magnitude == 0: _lowerCamelCase : Optional[Any] = column continue _lowerCamelCase : List[str] = column / magnitude # Subtract to cancel term _lowerCamelCase : List[str] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : Union[str, Any] = current_set[1::] for row in current_set: _lowerCamelCase : int = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_lowerCAmelCase ) continue for column_index in range(len(_lowerCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_lowerCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Optional[Any] = final_set[0] _lowerCamelCase : List[str] = [] _lowerCamelCase : Optional[Any] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Optional[int] = simplify(_lowerCAmelCase ) for i in range(len(_lowerCAmelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , _lowerCAmelCase ) _lowerCamelCase : Any = resultant return final_set def A_ ( _lowerCAmelCase : list[list] ): """simple docstring""" if len(_lowerCAmelCase ) == 0: raise IndexError("solve_simultaneous() requires n lists of length n+1" ) _lowerCamelCase : str = len(_lowerCAmelCase ) + 1 if any(len(_lowerCAmelCase ) != _length for item in equations ): raise IndexError("solve_simultaneous() requires n lists of length n+1" ) for row in equations: if any(not isinstance(_lowerCAmelCase , (int, float) ) for column in row ): raise ValueError("solve_simultaneous() requires lists of integers" ) if len(_lowerCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : int = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : Union[str, Any] = data_set.copy() _lowerCamelCase : Optional[Any] = [] for row_index, row in enumerate(_lowerCAmelCase ): if 0 not in row: _lowerCamelCase : Dict = data_set.pop(_lowerCAmelCase ) break if not full_row: raise ValueError("solve_simultaneous() requires at least 1 full equation" ) data_set.insert(0 , _lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = data_set.copy() _lowerCamelCase : Union[str, Any] = simplify(_lowerCAmelCase ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : List[Any] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[int] = row.copy()[: len(_lowerCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_lowerCAmelCase ) == 0: solutions.append(0 ) continue _lowerCamelCase : List[str] = temp_row[1::] _lowerCamelCase : Union[str, Any] = temp_row[::-1] for column_index, column in enumerate(_lowerCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_lowerCAmelCase ) _lowerCamelCase : Tuple = [] for item in solutions: final.append(float(round(_lowerCAmelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
44
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
0
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def A ( lowercase__ : str ) -> Any: # picklable for multiprocessing return x.sum() def A ( lowercase__ : Dict ) -> List[Any]: # picklable for multiprocessing return i + 1 @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : int _snake_case : str class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :Union[str, Any] ): UpperCamelCase__ :List[str] = {} UpperCamelCase__ :Dict = [] UpperCamelCase__ :List[Any] = 1 UpperCamelCase__ :Optional[Any] = [1, 2] UpperCamelCase__ :Dict = {"""a""": 1, """b""": 2} UpperCamelCase__ :Dict = {"""a""": [1, 2], """b""": [3, 4]} UpperCamelCase__ :List[Any] = {"""a""": {"""1""": 1}, """b""": 2} UpperCamelCase__ :Optional[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} UpperCamelCase__ :Union[str, Any] = {} UpperCamelCase__ :Tuple = [] UpperCamelCase__ :Dict = 2 UpperCamelCase__ :Optional[Any] = [2, 3] UpperCamelCase__ :Dict = {"""a""": 2, """b""": 3} UpperCamelCase__ :int = {"""a""": [2, 3], """b""": [4, 5]} UpperCamelCase__ :Optional[int] = {"""a""": {"""1""": 2}, """b""": 3} UpperCamelCase__ :Union[str, Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) UpperCamelCase__ :Dict = 2 self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) UpperCamelCase__ :int = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )} UpperCamelCase__ :int = {"""a""": 2, """b""": 0, """c""": 2} UpperCamelCase__ :Optional[int] = { """a""": np.eye(2 ).astype(lowerCamelCase__ ), """b""": np.zeros(3 ).astype(lowerCamelCase__ ), """c""": np.ones(2 ).astype(lowerCamelCase__ ), } self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda map_nested(lambda lowerCamelCase__ : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ ) def __a ( self :Tuple ): UpperCamelCase__ :List[str] = {"""a""": 1, """b""": 2} UpperCamelCase__ :Tuple = {"""a""": 3, """b""": 4} UpperCamelCase__ :Optional[Any] = {"""a""": 5, """b""": 6} UpperCamelCase__ :Union[str, Any] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ ) def __a ( self :Optional[Any] ): class lowerCAmelCase_ : """simple docstring""" _snake_case : List[str] = """bar""" UpperCamelCase__ :List[str] = Foo() self.assertEqual(foo.my_attr , """bar""" ) with temporary_assignment(lowerCamelCase__ , """my_attr""" , """BAR""" ): self.assertEqual(foo.my_attr , """BAR""" ) self.assertEqual(foo.my_attr , """bar""" ) @pytest.mark.parametrize( """iterable_length, num_proc, expected_num_proc""" , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def A ( lowercase__ : str , lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]: with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch( """datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool: UpperCamelCase__ :Optional[int] = {f"""{i}""": i for i in range(lowercase__ )} UpperCamelCase__ :Any = map_nested(lambda lowercase__ : x + 10 , lowercase__ , num_proc=lowercase__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class lowerCAmelCase_ ( lowercase ): """simple docstring""" @require_tf def __a ( self :str ): import tensorflow as tf from tensorflow.keras import layers UpperCamelCase__ :List[Any] = layers.Dense(2 ) def gen_random_output(): UpperCamelCase__ :str = tf.random.uniform((1, 3) ) return model(lowerCamelCase__ ).numpy() with temp_seed(42 , set_tensorflow=lowerCamelCase__ ): UpperCamelCase__ :List[Any] = gen_random_output() with temp_seed(42 , set_tensorflow=lowerCamelCase__ ): UpperCamelCase__ :Tuple = gen_random_output() UpperCamelCase__ :Any = gen_random_output() np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __a ( self :Union[str, Any] ): import torch def gen_random_output(): UpperCamelCase__ :Optional[int] = torch.nn.Linear(3 , 2 ) UpperCamelCase__ :List[Any] = torch.rand(1 , 3 ) return model(lowerCamelCase__ ).detach().numpy() with temp_seed(42 , set_pytorch=lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = gen_random_output() with temp_seed(42 , set_pytorch=lowerCamelCase__ ): UpperCamelCase__ :Union[str, Any] = gen_random_output() UpperCamelCase__ :Optional[Any] = gen_random_output() np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __a ( self :List[Any] ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): UpperCamelCase__ :Optional[int] = gen_random_output() with temp_seed(42 ): UpperCamelCase__ :str = gen_random_output() UpperCamelCase__ :Any = gen_random_output() np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("""input_data""" , [{}] ) def A ( lowercase__ : Dict ) -> Union[str, Any]: UpperCamelCase__ :Tuple = NestedDataStructure(lowercase__ ).data assert output_data == input_data @pytest.mark.parametrize( """data, expected_output""" , [ ({}, []), ([], []), ("""foo""", ["""foo"""]), (["""foo""", """bar"""], ["""foo""", """bar"""]), ([["""foo""", """bar"""]], ["""foo""", """bar"""]), ([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]), ([[["""foo"""], """bar"""]], ["""foo""", """bar"""]), ({"""a""": 1, """b""": 2}, [1, 2]), ({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]), ({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]), ] , ) def A ( lowercase__ : str , lowercase__ : Union[str, Any] ) -> List[Any]: UpperCamelCase__ :str = NestedDataStructure(lowercase__ ).flatten() assert output == expected_output def A ( ) -> Optional[Any]: UpperCamelCase__ :Optional[int] = A(x=1 , y="""foobar""" ) UpperCamelCase__ :Dict = {"""x""": 1, """y""": """foobar"""} assert asdict(lowercase__ ) == expected_output UpperCamelCase__ :Union[str, Any] = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]} UpperCamelCase__ :Dict = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]} assert asdict(lowercase__ ) == expected_output with pytest.raises(lowercase__ ): asdict([1, A(x=10 , y="""foo""" )] ) def A ( lowercase__ : str ) -> List[str]: return text.split() def A ( lowercase__ : Any ) -> str: yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def A ( ) -> str: with Pool(2 ) as pool: UpperCamelCase__ :List[Any] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(lowercase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: UpperCamelCase__ :Union[str, Any] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(lowercase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: UpperCamelCase__ :Tuple = [] for yield_time, content in iflatmap_unordered( lowercase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowercase__ ) assert out.count("""a""" ) == 2 assert out.count("""b""" ) == 2 assert len(lowercase__ ) == 4
45
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = ShapEImgaImgPipeline lowerCAmelCase__ = ['image'] lowerCAmelCase__ = ['image'] lowerCAmelCase__ = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowerCAmelCase__ = False @property def _lowercase ( self: List[Any] ): '''simple docstring''' return 32 @property def _lowercase ( self: Dict ): '''simple docstring''' return 32 @property def _lowercase ( self: Any ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowercase ( self: Tuple ): '''simple docstring''' return 8 @property def _lowercase ( self: Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,) _lowerCamelCase : Any = CLIPVisionModel(__lowerCAmelCase ) return model @property def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : List[Any] = CLIPImageProcessor( crop_size=224 ,do_center_crop=__lowerCAmelCase ,do_normalize=__lowerCAmelCase ,do_resize=__lowerCAmelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=224 ,) return image_processor @property def _lowercase ( self: Tuple ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : int = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } _lowerCamelCase : str = PriorTransformer(**__lowerCAmelCase ) return model @property def _lowercase ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _lowerCamelCase : Optional[Any] = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } _lowerCamelCase : Union[str, Any] = ShapERenderer(**__lowerCAmelCase ) return model def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Any = self.dummy_prior _lowerCamelCase : List[Any] = self.dummy_image_encoder _lowerCamelCase : Union[str, Any] = self.dummy_image_processor _lowerCamelCase : Tuple = self.dummy_renderer _lowerCamelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" ,num_train_timesteps=1_024 ,prediction_type="sample" ,use_karras_sigmas=__lowerCAmelCase ,clip_sample=__lowerCAmelCase ,clip_sample_range=1.0 ,) _lowerCamelCase : Union[str, Any] = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "renderer": renderer, "scheduler": scheduler, } return components def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=0 ): '''simple docstring''' _lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) if str(__lowerCAmelCase ).startswith("mps" ): _lowerCamelCase : Tuple = torch.manual_seed(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : Dict = "cpu" _lowerCamelCase : List[str] = self.get_dummy_components() _lowerCamelCase : int = self.pipeline_class(**__lowerCAmelCase ) _lowerCamelCase : Tuple = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _lowerCamelCase : List[Any] = output.images[0] _lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _lowerCamelCase : Union[str, Any] = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self: List[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : List[str] = torch_device == "cpu" _lowerCamelCase : List[Any] = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=__lowerCAmelCase ,relax_max_difference=__lowerCAmelCase ,) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : int = self.get_dummy_components() _lowerCamelCase : Optional[int] = self.pipeline_class(**__lowerCAmelCase ) _lowerCamelCase : List[Any] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Dict = 1 _lowerCamelCase : Optional[int] = 2 _lowerCamelCase : str = self.get_dummy_inputs(__lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: _lowerCamelCase : Optional[Any] = batch_size * [inputs[key]] _lowerCamelCase : str = pipe(**__lowerCAmelCase ,num_images_per_prompt=__lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A_ ( unittest.TestCase ): def _lowercase ( self: Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" ) _lowerCamelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_img2img_out.npy" ) _lowerCamelCase : Optional[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" ) _lowerCamelCase : Any = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) _lowerCamelCase : Dict = pipe( __lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowerCAmelCase ,__lowerCAmelCase )
46
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
0
import torch def UpperCAmelCase__ ( ): if torch.cuda.is_available(): __a : str = torch.cuda.device_count() else: __a : List[str] = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
47
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
0
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[Any] , __magic_name__ : Dict=-1 ): """simple docstring""" lowerCAmelCase__ = label_idx def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[Split, str] ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(__magic_name__ , encoding="utf-8" ) as f: lowerCAmelCase__ = [] lowerCAmelCase__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) ) guid_index += 1 lowerCAmelCase__ = [] lowerCAmelCase__ = [] else: lowerCAmelCase__ = line.split(" " ) words.append(splits[0] ) if len(__magic_name__ ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) ) return examples def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ): """simple docstring""" lowerCAmelCase__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(__magic_name__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(__magic_name__ ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ): """simple docstring""" if path: with open(__magic_name__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[int] ): """simple docstring""" super().__init__(label_idx=-2 ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" if path: with open(__magic_name__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[Split, str] ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(__magic_name__ , f"""{mode}.txt""" ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(__magic_name__ , encoding="utf-8" ) as f: for sentence in parse_incr(__magic_name__ ): lowerCAmelCase__ = [] lowerCAmelCase__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(__magic_name__ ) == len(__magic_name__ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__magic_name__ , labels=__magic_name__ ) ) guid_index += 1 return examples def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ): """simple docstring""" lowerCAmelCase__ = 0 for sentence in parse_incr(__magic_name__ ): lowerCAmelCase__ = preds_list[example_id] lowerCAmelCase__ = "" for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(__magic_name__ ) example_id += 1 def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" if path: with open(__magic_name__ , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
0
"""simple docstring""" _lowercase : Union[str, Any] = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def lowercase__ ( snake_case_ :float ): assert type(snake_case_ ) in (int, float) and decimal == int(snake_case_ ) __UpperCAmelCase = int(snake_case_ ) __UpperCAmelCase = '''''' __UpperCAmelCase = False if decimal < 0: __UpperCAmelCase = True decimal *= -1 while decimal > 0: __UpperCAmelCase , __UpperCAmelCase = divmod(snake_case_ , 16 ) __UpperCAmelCase = values[remainder] + hexadecimal __UpperCAmelCase = '''0x''' + hexadecimal if negative: __UpperCAmelCase = '''-''' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : int = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCamelCase : List[str] = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCamelCase : Optional[int] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCamelCase : Dict = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCamelCase : List[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCamelCase : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCamelCase : int = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCamelCase : Tuple = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCamelCase : Tuple = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCamelCase : str = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCamelCase : Optional[int] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCamelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCamelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCamelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCamelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModel) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase : Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase : Any = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase : int = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCamelCase : int = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class UpperCamelCase__ (_BaseAutoModelClass ): '''simple docstring''' _UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCamelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
50
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =42 class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : List[Any] , a__ : int = 3 , a__ : int = 3 , a__ : Tuple[str] = ("DownEncoderBlock2D",) , a__ : Tuple[str] = ("UpDecoderBlock2D",) , a__ : Tuple[int] = (64,) , a__ : int = 1 , a__ : str = "silu" , a__ : int = 3 , a__ : int = 32 , a__ : int = 256 , a__ : int = 32 , a__ : Optional[int] = None , a__ : float = 0.18_215 , a__ : str = "group" , ): super().__init__() # pass init params to Encoder UpperCAmelCase = Encoder( in_channels=a__ , out_channels=a__ , down_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , double_z=a__ , ) UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCAmelCase = nn.Convad(a__ , a__ , 1 ) UpperCAmelCase = VectorQuantizer(a__ , a__ , beta=0.25 , remap=a__ , sane_index_shape=a__ ) UpperCAmelCase = nn.Convad(a__ , a__ , 1 ) # pass init params to Decoder UpperCAmelCase = Decoder( in_channels=a__ , out_channels=a__ , up_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , norm_type=a__ , ) @apply_forward_hook def __snake_case ( self : Tuple , a__ : torch.FloatTensor , a__ : bool = True ): UpperCAmelCase = self.encoder(a__ ) UpperCAmelCase = self.quant_conv(a__ ) if not return_dict: return (h,) return VQEncoderOutput(latents=a__ ) @apply_forward_hook def __snake_case ( self : str , a__ : torch.FloatTensor , a__ : bool = False , a__ : bool = True ): # also go through quantization layer if not force_not_quantize: UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = self.quantize(a__ ) else: UpperCAmelCase = h UpperCAmelCase = self.post_quant_conv(a__ ) UpperCAmelCase = self.decoder(a__ , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=a__ ) def __snake_case ( self : List[Any] , a__ : torch.FloatTensor , a__ : bool = True ): UpperCAmelCase = sample UpperCAmelCase = self.encode(a__ ).latents UpperCAmelCase = self.decode(a__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=a__ )
51
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
0
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def __A ( a_ :Optional[Any]="ro" , a_ :List[str]="en" , a_ :str="wmt16" , a_ :str=None) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''') __a : int = F"""{src_lang}-{tgt_lang}""" print(F"""Converting {dataset}-{pair}""") __a : Union[str, Any] = datasets.load_dataset(a_ , a_) if save_dir is None: __a : Union[str, Any] = F"""{dataset}-{pair}""" __a : List[Any] = Path(a_) save_dir.mkdir(exist_ok=a_) for split in ds.keys(): print(F"""Splitting {split} with {ds[split].num_rows} records""") # to save to val.source, val.target like summary datasets __a : Optional[int] = '''val''' if split == '''validation''' else split __a : Tuple = save_dir.joinpath(F"""{fn}.source""") __a : int = save_dir.joinpath(F"""{fn}.target""") __a : str = src_path.open('''w+''') __a : Tuple = tgt_path.open('''w+''') # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split]): __a : List[str] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''') tgt_fp.write(ex[tgt_lang] + '''\n''') print(F"""Saved {dataset} dataset to {save_dir}""") if __name__ == "__main__": fire.Fire(download_wmt_dataset)
52
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
0
import unittest from knapsack import greedy_knapsack as kp class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Union[str, Any] ) -> List[str]: __lowerCAmelCase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0] __lowerCAmelCase = [2, 4, 6, 8, 1_0, 1_2] __lowerCAmelCase = 1_0_0 self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_1_0 ) def lowercase ( self : Tuple ) -> Dict: self.assertRaisesRegex(lowerCAmelCase_ , 'max_weight must greater than zero.' ) def lowercase ( self : str ) -> Tuple: self.assertRaisesRegex(lowerCAmelCase_ , 'Weight can not be negative.' ) def lowercase ( self : int ) -> Optional[Any]: self.assertRaisesRegex(lowerCAmelCase_ , 'Profit can not be negative.' ) def lowercase ( self : int ) -> Tuple: self.assertRaisesRegex(lowerCAmelCase_ , 'max_weight must greater than zero.' ) def lowercase ( self : Optional[Any] ) -> int: self.assertRaisesRegex( lowerCAmelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
53
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
0
from manim import * class A ( __lowercase ): def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ =Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ =Rectangle(height=0.25 , width=0.25 ) UpperCAmelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase_ =[mem.copy() for i in range(6 )] UpperCAmelCase_ =[mem.copy() for i in range(6 )] UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =Text("CPU" , font_size=24 ) UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCAmelCase ) UpperCAmelCase_ =[mem.copy() for i in range(4 )] UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =Text("GPU" , font_size=24 ) UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_lowerCAmelCase ) UpperCAmelCase_ =[mem.copy() for i in range(6 )] UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =Text("Model" , font_size=24 ) UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_lowerCAmelCase ) UpperCAmelCase_ =[] UpperCAmelCase_ =[] UpperCAmelCase_ =[] for i, rect in enumerate(_lowerCAmelCase ): rect.set_stroke(_lowerCAmelCase ) UpperCAmelCase_ =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=_lowerCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowerCAmelCase , buff=0.0 ) self.add(_lowerCAmelCase ) model_cpu_arr.append(_lowerCAmelCase ) self.add(*_lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase ) UpperCAmelCase_ =[mem.copy() for i in range(6 )] UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =Text("Loaded Checkpoint" , font_size=24 ) UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(_lowerCAmelCase ) UpperCAmelCase_ =[] UpperCAmelCase_ =[] for i, rect in enumerate(_lowerCAmelCase ): UpperCAmelCase_ =fill.copy().set_fill(_lowerCAmelCase , opacity=0.7 ) target.move_to(_lowerCAmelCase ) ckpt_arr.append(_lowerCAmelCase ) UpperCAmelCase_ =target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_lowerCAmelCase ) self.add(*_lowerCAmelCase , *_lowerCAmelCase ) UpperCAmelCase_ =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ =MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase_ =MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_lowerCAmelCase ) UpperCAmelCase_ =MarkupText( F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) UpperCAmelCase_ =[meta_mem.copy() for i in range(6 )] UpperCAmelCase_ =[meta_mem.copy() for i in range(6 )] UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 ) UpperCAmelCase_ =Text("Disk" , font_size=24 ) UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_lowerCAmelCase , run_time=3 ) , Write(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) ) UpperCAmelCase_ =[] for i, rect in enumerate(_lowerCAmelCase ): UpperCAmelCase_ =rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) ) self.play(*_lowerCAmelCase ) self.play(FadeOut(_lowerCAmelCase ) ) UpperCAmelCase_ =MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCAmelCase , run_time=3 ) ) self.play( FadeOut(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase ) , ) self.wait()
54
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A : Dict ,A : List[Any]=7 ,A : Any=3 ,A : List[Any]=30 ,A : List[Any]=4_00 ,A : Union[str, Any]=True ,A : List[str]=None ,A : Optional[Any]=0.9 ,A : Any=None ,A : List[Any]=True ,A : List[Any]=[0.5, 0.5, 0.5] ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,): __A = size if size is not None else {"shortest_edge": 30} __A = crop_size if crop_size is not None else {"height": 30, "width": 30} __A = parent __A = batch_size __A = num_channels __A = min_resolution __A = max_resolution __A = do_resize_and_center_crop __A = size __A = crop_pct __A = crop_size __A = do_normalize __A = image_mean __A = image_std def UpperCamelCase_ ( self : Optional[int] ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self : Optional[Any] ): __A = PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self : List[Any] ): __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) ) self.assertTrue(hasattr(A ,"size" ) ) self.assertTrue(hasattr(A ,"crop_pct" ) ) self.assertTrue(hasattr(A ,"do_normalize" ) ) self.assertTrue(hasattr(A ,"image_mean" ) ) self.assertTrue(hasattr(A ,"image_std" ) ) def UpperCamelCase_ ( self : Tuple ): __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"shortest_edge": 30} ) self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} ) __A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} ) def UpperCamelCase_ ( self : Optional[Any] ): pass def UpperCamelCase_ ( self : str ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) def UpperCamelCase_ ( self : Union[str, Any] ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) def UpperCamelCase_ ( self : Any ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,)
55
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
0
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _a : Dict = "pt" elif is_tf_available(): _a : Any = "tf" else: _a : Any = "jax" class _lowercase ( __lowercase , unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = ByTaTokenizer _SCREAMING_SNAKE_CASE : List[Any] = False def a ( self : int ) -> int: super().setUp() __snake_case = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a ( self : Any ) -> Optional[int]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Dict=20 , SCREAMING_SNAKE_CASE_ : List[str]=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __snake_case = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): try: __snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case = list(filter(lambda SCREAMING_SNAKE_CASE_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , SCREAMING_SNAKE_CASE_ ) ) __snake_case = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: __snake_case = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: __snake_case = toks + toks # toks_str = [t[1] for t in toks] __snake_case = [t[0] for t in toks] # Ensure consistency __snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: __snake_case = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: __snake_case = ' ' + output_txt __snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def a ( self : Optional[int] ) -> Optional[int]: __snake_case = self.ta_base_tokenizer __snake_case = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) __snake_case = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def a ( self : Any ) -> List[Any]: __snake_case = self.ta_base_tokenizer __snake_case = 'Unicode €.' __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ) __snake_case = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE_ ) # decoding __snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , 'Unicode €.</s>' ) __snake_case = tokenizer('e è é ê ë' ) __snake_case = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , SCREAMING_SNAKE_CASE_ ) # decoding __snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def a ( self : int ) -> Union[str, Any]: __snake_case = self.ta_base_tokenizer __snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if FRAMEWORK != "jax": __snake_case = list(batch.input_ids.numpy()[0] ) else: __snake_case = list(batch.input_ids.tolist()[0] ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def a ( self : Optional[Any] ) -> Union[str, Any]: __snake_case = self.ta_base_tokenizer __snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , SCREAMING_SNAKE_CASE_ ) self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('decoder_input_ids' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE_ ) def a ( self : int ) -> Optional[Any]: __snake_case = self.ta_base_tokenizer __snake_case = [ 'Summary of the text.', 'Another summary.', ] __snake_case = tokenizer( text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='max_length' , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def a ( self : Dict ) -> Union[str, Any]: __snake_case = self.ta_base_tokenizer __snake_case = ['A long paragraph for summarization. </s>'] __snake_case = ['Summary of the text. </s>'] # fmt: off __snake_case = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] __snake_case = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , text_target=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch['input_ids'][0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch['labels'][0] ) def a ( self : Tuple ) -> Optional[int]: # safety check on max_len default value so we are sure the test works __snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case = tempfile.mkdtemp() __snake_case = ' He is very happy, UNwant\u00E9d,running' __snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) __snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc __snake_case = tempfile.mkdtemp() __snake_case = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __snake_case = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) def a ( self : int ) -> Dict: __snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __snake_case = json.load(SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __snake_case = json.load(SCREAMING_SNAKE_CASE_ ) __snake_case = [f'<extra_id_{i}>' for i in range(125 )] __snake_case = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case = tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=SCREAMING_SNAKE_CASE_ )] __snake_case = tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def a ( self : Tuple ) -> List[Any]: __snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertTrue(tokenizer.decode([255] ) == '' ) def a ( self : Optional[int] ) -> Optional[int]: pass def a ( self : Optional[Any] ) -> Any: pass def a ( self : Any ) -> Union[str, Any]: pass def a ( self : Dict ) -> Union[str, Any]: pass def a ( self : List[str] ) -> Any: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens __snake_case = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __snake_case = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] __snake_case = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a ( self : List[str] ) -> Union[str, Any]: __snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): __snake_case = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] __snake_case = 0 __snake_case = tokenizer.convert_ids_to_tokens( SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for attr in attributes_list: setattr(SCREAMING_SNAKE_CASE_ , attr + '_id' , SCREAMING_SNAKE_CASE_ ) self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + '_id' ) , SCREAMING_SNAKE_CASE_ ) setattr(SCREAMING_SNAKE_CASE_ , attr + '_id' , SCREAMING_SNAKE_CASE_ ) self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + '_id' ) , SCREAMING_SNAKE_CASE_ ) setattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens_ids' ) , [] ) setattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
56
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Dict = logging.get_logger(__name__) A_ : int = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Tuple ='''vit_mae''' def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=1_6 , _lowerCamelCase=5_1_2 , _lowerCamelCase=8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=0.7_5 , _lowerCamelCase=False , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) UpperCamelCase_: Any = hidden_size UpperCamelCase_: Any = num_hidden_layers UpperCamelCase_: Tuple = num_attention_heads UpperCamelCase_: str = intermediate_size UpperCamelCase_: Dict = hidden_act UpperCamelCase_: Any = hidden_dropout_prob UpperCamelCase_: Optional[int] = attention_probs_dropout_prob UpperCamelCase_: Any = initializer_range UpperCamelCase_: List[str] = layer_norm_eps UpperCamelCase_: Union[str, Any] = image_size UpperCamelCase_: Any = patch_size UpperCamelCase_: Any = num_channels UpperCamelCase_: str = qkv_bias UpperCamelCase_: str = decoder_num_attention_heads UpperCamelCase_: Dict = decoder_hidden_size UpperCamelCase_: Dict = decoder_num_hidden_layers UpperCamelCase_: Optional[Any] = decoder_intermediate_size UpperCamelCase_: List[str] = mask_ratio UpperCamelCase_: Tuple = norm_pix_loss
57
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
0
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters __lowerCAmelCase : Any = False __lowerCAmelCase : Tuple = False def __lowerCAmelCase ( __UpperCamelCase : Namespace ): '''simple docstring''' return TrainCommand(__UpperCamelCase ) class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @staticmethod def UpperCAmelCase__ ( _lowercase ) -> Any: '''simple docstring''' snake_case_ : str = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" , type=_lowercase , required=_lowercase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , ) train_parser.add_argument( """--column_label""" , type=_lowercase , default=0 , help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" , type=_lowercase , default=1 , help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" , type=_lowercase , default=2 , help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" , type=_lowercase , default="""""" , help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" , type=_lowercase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , ) train_parser.add_argument("""--output""" , type=_lowercase , default="""./""" , help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" , type=_lowercase , default="""text_classification""" , help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" , type=_lowercase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" , type=_lowercase , default=3_2 , help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" , type=_lowercase , default=6_4 , help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" , type=_lowercase , default=3E-5 , help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" , type=_lowercase , default=1E-08 , help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=_lowercase ) def __init__( self , _lowercase ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = logging.get_logger("""transformers-cli/training""" ) snake_case_ : Dict = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output , exist_ok=_lowercase ) snake_case_ : List[Any] = args.output snake_case_ : Optional[int] = args.column_label snake_case_ : List[str] = args.column_text snake_case_ : int = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": snake_case_ : Any = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) snake_case_ : Tuple = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) snake_case_ : Tuple = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) snake_case_ : List[Any] = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) snake_case_ : Dict = args.validation_split snake_case_ : Tuple = args.train_batch_size snake_case_ : List[Any] = args.valid_batch_size snake_case_ : List[Any] = args.learning_rate snake_case_ : str = args.adam_epsilon def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' raise NotImplementedError def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
58
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "decision_transformer" lowercase_ = ["past_key_values"] lowercase_ = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__(self : str , UpperCAmelCase_ : Optional[int]=17 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=128 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[str]=1_024 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=50_256 , UpperCAmelCase_ : Optional[Any]=50_256 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=False , **UpperCAmelCase_ : Tuple , ) ->Tuple: '''simple docstring''' lowerCamelCase__: List[str] =state_dim lowerCamelCase__: str =act_dim lowerCamelCase__: Optional[Any] =hidden_size lowerCamelCase__: Any =max_ep_len lowerCamelCase__: Any =action_tanh lowerCamelCase__: List[str] =vocab_size lowerCamelCase__: Optional[int] =n_positions lowerCamelCase__: str =n_layer lowerCamelCase__: List[Any] =n_head lowerCamelCase__: List[Any] =n_inner lowerCamelCase__: str =activation_function lowerCamelCase__: str =resid_pdrop lowerCamelCase__: Any =embd_pdrop lowerCamelCase__: str =attn_pdrop lowerCamelCase__: Tuple =layer_norm_epsilon lowerCamelCase__: Optional[int] =initializer_range lowerCamelCase__: str =scale_attn_weights lowerCamelCase__: Optional[Any] =use_cache lowerCamelCase__: Any =scale_attn_by_inverse_layer_idx lowerCamelCase__: List[Any] =reorder_and_upcast_attn lowerCamelCase__: Tuple =bos_token_id lowerCamelCase__: List[Any] =eos_token_id super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
59
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
0