code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : List[str] = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=A , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=A , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=A ) return parser.parse_args() def _SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" __snake_case : Dict = parse_args() # Import training_script as a module. __snake_case : List[str] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __snake_case : Optional[Any] = script_fpath.stem __snake_case : Optional[int] = importlib.import_module(A ) # Patch sys.argv __snake_case : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
61
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( A : int ) -> list[int]: """simple docstring""" __snake_case : Optional[Any] = 2 __snake_case : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(A ) if n > 1: factors.append(A ) return factors if __name__ == "__main__": import doctest doctest.testmod()
61
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) __snake_case : int = str(bin(A ) )[2:] # remove the leading "0b" __snake_case : Optional[int] = str(bin(A ) )[2:] # remove the leading "0b" __snake_case : List[Any] = max(len(A ) , len(A ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
61
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class a_ : def __init__(self) -> None: """simple docstring""" __snake_case : list[Any] = [] __snake_case : int = 0 __snake_case : int = 0 def SCREAMING_SNAKE_CASE__ (self) -> bool: """simple docstring""" return self.head == self.tail def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" self.data.append(__a) __snake_case : Optional[Any] = self.tail + 1 def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Optional[int] = self.data[self.head] __snake_case : Union[str, Any] = self.head + 1 return ret def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.tail - self.head def SCREAMING_SNAKE_CASE__ (self) -> None: """simple docstring""" print(self.data) print('**************') print(self.data[self.head : self.tail]) class a_ : def __init__(self , __a) -> None: """simple docstring""" __snake_case : Tuple = data __snake_case : MyNode | None = None __snake_case : MyNode | None = None __snake_case : int = 1 def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" return self.data def SCREAMING_SNAKE_CASE__ (self) -> MyNode | None: """simple docstring""" return self.left def SCREAMING_SNAKE_CASE__ (self) -> MyNode | None: """simple docstring""" return self.right def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.height def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : Optional[int] = data def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : List[str] = node def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : Tuple = node def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : List[str] = height def _SCREAMING_SNAKE_CASE ( A : MyNode | None ) -> int: """simple docstring""" if node is None: return 0 return node.get_height() def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> int: """simple docstring""" if a > b: return a return b def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> MyNode: """simple docstring""" print('left rotation node:' , node.get_data() ) __snake_case : List[str] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(A ) __snake_case : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) __snake_case : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(A ) return ret def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> MyNode: """simple docstring""" print('right rotation node:' , node.get_data() ) __snake_case : List[str] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(A ) __snake_case : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) __snake_case : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(A ) return ret def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> MyNode: """simple docstring""" __snake_case : Dict = node.get_left() assert left_child is not None node.set_left(left_rotation(A ) ) return right_rotation(A ) def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> MyNode: """simple docstring""" __snake_case : List[str] = node.get_right() assert right_child is not None node.set_right(right_rotation(A ) ) return left_rotation(A ) def _SCREAMING_SNAKE_CASE ( A : MyNode | None , A : Any ) -> MyNode | None: """simple docstring""" if node is None: return MyNode(A ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , A ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __snake_case : Optional[Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __snake_case : Union[str, Any] = right_rotation(A ) else: __snake_case : Any = lr_rotation(A ) else: node.set_right(insert_node(node.get_right() , A ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __snake_case : Any = node.get_right() assert right_child is not None if data < right_child.get_data(): __snake_case : List[str] = rl_rotation(A ) else: __snake_case : Dict = left_rotation(A ) __snake_case : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) return node def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> Any: """simple docstring""" while True: __snake_case : Optional[int] = root.get_right() if right_child is None: break __snake_case : Any = right_child return root.get_data() def _SCREAMING_SNAKE_CASE ( A : MyNode ) -> Any: """simple docstring""" while True: __snake_case : Optional[Any] = root.get_left() if left_child is None: break __snake_case : Tuple = left_child return root.get_data() def _SCREAMING_SNAKE_CASE ( A : MyNode , A : Any ) -> MyNode | None: """simple docstring""" __snake_case : Tuple = root.get_left() __snake_case : Union[str, Any] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __snake_case : Tuple = get_left_most(A ) root.set_data(A ) root.set_right(del_node(A , A ) ) elif left_child is not None: __snake_case : List[str] = left_child elif right_child is not None: __snake_case : Dict = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(A , A ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(A , A ) ) if get_height(A ) - get_height(A ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __snake_case : Optional[Any] = left_rotation(A ) else: __snake_case : Optional[Any] = rl_rotation(A ) elif get_height(A ) - get_height(A ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __snake_case : List[Any] = right_rotation(A ) else: __snake_case : Union[str, Any] = lr_rotation(A ) __snake_case : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(A ) return root class a_ : def __init__(self) -> None: """simple docstring""" __snake_case : MyNode | None = None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return get_height(self.root) def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" print('insert:' + str(__a)) __snake_case : Any = insert_node(self.root , __a) def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" print('delete:' + str(__a)) if self.root is None: print('Tree is empty!') return __snake_case : Union[str, Any] = del_node(self.root , __a) def __str__(self , ) -> str: # a level traversale, gives a more intuitive look on the tree """simple docstring""" __snake_case : List[str] = '' __snake_case : Optional[int] = MyQueue() q.push(self.root) __snake_case : Union[str, Any] = self.get_height() if layer == 0: return output __snake_case : Union[str, Any] = 0 while not q.is_empty(): __snake_case : Dict = q.pop() __snake_case : Tuple = ' ' * int(math.pow(2 , layer - 1)) output += space if node is None: output += "*" q.push(__a) q.push(__a) else: output += str(node.get_data()) q.push(node.get_left()) q.push(node.get_right()) output += space __snake_case : Optional[Any] = cnt + 1 for i in range(1_0_0): if cnt == math.pow(2 , __a) - 1: __snake_case : Dict = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() __A = AVLtree() __A = list(range(1_0)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
61
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
1
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class a_ ( UpperCamelCase_ ): _snake_case = 42 _snake_case = 42 class a_ ( UpperCamelCase_ , UpperCamelCase_ ): _snake_case = 1 @register_to_config def __init__(self , __a = 2_0_0_0 , __a = 0.15 , __a = 0.01 , __a = 1_348.0 , __a = 1E-5 , __a = 1 , ) -> int: """simple docstring""" __snake_case : List[Any] = sigma_max # setable values __snake_case : Optional[int] = None self.set_sigmas(__a , __a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> torch.FloatTensor: """simple docstring""" return sample def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None) -> Dict: """simple docstring""" __snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps __snake_case : Optional[int] = torch.linspace(1 , __a , __a , device=__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None) -> List[Any]: """simple docstring""" __snake_case : List[Any] = sigma_min if sigma_min is not None else self.config.sigma_min __snake_case : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max __snake_case : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(__a , __a) __snake_case : List[str] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __snake_case : str = torch.exp(torch.linspace(math.log(__a) , math.log(__a) , __a)) __snake_case : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> int: """simple docstring""" return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , __a = True , ) -> Union[SdeVeOutput, Tuple]: """simple docstring""" if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') __snake_case : List[str] = timestep * torch.ones( sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) __snake_case : Optional[int] = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __snake_case : str = timesteps.to(self.discrete_sigmas.device) __snake_case : Tuple = self.discrete_sigmas[timesteps].to(sample.device) __snake_case : List[Any] = self.get_adjacent_sigma(__a , __a).to(sample.device) __snake_case : int = torch.zeros_like(__a) __snake_case : int = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __snake_case : Optional[Any] = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): __snake_case : List[Any] = diffusion.unsqueeze(-1) __snake_case : List[Any] = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __snake_case : Tuple = randn_tensor( sample.shape , layout=sample.layout , generator=__a , device=sample.device , dtype=sample.dtype) __snake_case : Any = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __snake_case : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=__a , prev_sample_mean=__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , __a = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __snake_case : Optional[int] = randn_tensor(sample.shape , layout=sample.layout , generator=__a).to(sample.device) # compute step size from the model_output, the noise, and the snr __snake_case : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean() __snake_case : Union[str, Any] = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean() __snake_case : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __snake_case : List[str] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __snake_case : Dict = step_size.flatten() while len(step_size.shape) < len(sample.shape): __snake_case : Any = step_size.unsqueeze(-1) __snake_case : Optional[int] = sample + step_size * model_output __snake_case : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , ) -> torch.FloatTensor: """simple docstring""" __snake_case : Any = timesteps.to(original_samples.device) __snake_case : int = self.discrete_sigmas.to(original_samples.device)[timesteps] __snake_case : str = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(__a) * sigmas[:, None, None, None] ) __snake_case : Union[str, Any] = noise + original_samples return noisy_samples def __len__(self) -> List[Any]: """simple docstring""" return self.config.num_train_timesteps
61
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> int: """simple docstring""" if len(A ) != len(A ): raise ValueError('String lengths must match!' ) __snake_case : str = 0 for chara, chara in zip(A , A ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> int: """simple docstring""" while second != 0: __snake_case : Dict = first & second first ^= second __snake_case : int = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __A = int(input('''Enter the first number: ''').strip()) __A = int(input('''Enter the second number: ''').strip()) print(f'''{add(first, second) = }''')
61
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
1
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __A = 2_9_9_7_9_2_4_5_8 # Symbols __A , __A , __A , __A = symbols('''ct x y z''') def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(A ) ** 2 ) def _SCREAMING_SNAKE_CASE ( A : float ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(A ), -gamma(A ) * beta(A ), 0, 0], [-gamma(A ) * beta(A ), gamma(A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def _SCREAMING_SNAKE_CASE ( A : float , A : np.ndarray | None = None ) -> np.ndarray: """simple docstring""" # Ensure event is not empty if event is None: __snake_case : Optional[int] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __A = transform(2_9_9_7_9_2_4_5) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __A = {ct: c, x: 1, y: 1, z: 1} __A = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
61
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar('''KEY''') __A = TypeVar('''VAL''') @dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ ) class a_ ( Generic[KEY, VAL] ): _snake_case = 42 _snake_case = 42 class a_ ( _Item ): def __init__(self) -> None: """simple docstring""" super().__init__(__a , __a) def __bool__(self) -> bool: """simple docstring""" return False __A = _DeletedItem() class a_ ( MutableMapping[KEY, VAL] ): def __init__(self , __a = 8 , __a = 0.75) -> None: """simple docstring""" __snake_case : Any = initial_block_size __snake_case : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __snake_case : List[str] = capacity_factor __snake_case : Dict = 0 def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" return hash(__a) % len(self._buckets) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" return (ind + 1) % len(self._buckets) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> bool: """simple docstring""" __snake_case : List[str] = self._buckets[ind] if not stored: __snake_case : Optional[Any] = _Item(__a , __a) self._len += 1 return True elif stored.key == key: __snake_case : Dict = _Item(__a , __a) return True else: return False def SCREAMING_SNAKE_CASE__ (self) -> bool: """simple docstring""" __snake_case : str = len(self._buckets) * self._capacity_factor return len(self) >= int(__a) def SCREAMING_SNAKE_CASE__ (self) -> bool: """simple docstring""" if len(self._buckets) <= self._initial_block_size: return False __snake_case : str = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : Optional[int] = self._buckets __snake_case : Dict = [None] * new_size __snake_case : Optional[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def SCREAMING_SNAKE_CASE__ (self) -> None: """simple docstring""" self._resize(len(self._buckets) * 2) def SCREAMING_SNAKE_CASE__ (self) -> None: """simple docstring""" self._resize(len(self._buckets) // 2) def SCREAMING_SNAKE_CASE__ (self , __a) -> Iterator[int]: """simple docstring""" __snake_case : List[Any] = self._get_bucket_index(__a) for _ in range(len(self._buckets)): yield ind __snake_case : Union[str, Any] = self._get_next_ind(__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" for ind in self._iterate_buckets(__a): if self._try_set(__a , __a , __a): break def __setitem__(self , __a , __a) -> None: """simple docstring""" if self._is_full(): self._size_up() self._add_item(__a , __a) def __delitem__(self , __a) -> None: """simple docstring""" for ind in self._iterate_buckets(__a): __snake_case : Optional[int] = self._buckets[ind] if item is None: raise KeyError(__a) if item is _deleted: continue if item.key == key: __snake_case : int = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__(self , __a) -> VAL: """simple docstring""" for ind in self._iterate_buckets(__a): __snake_case : Union[str, Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__a) def __len__(self) -> int: """simple docstring""" return self._len def __iter__(self) -> Iterator[KEY]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__(self) -> str: """simple docstring""" __snake_case : str = ' ,'.join( F"""{item.key}: {item.val}""" for item in self._buckets if item) return F"""HashMap({val_string})"""
61
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
1
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan __A = 637_8137.0 __A = 635_6752.31_4245 __A = 6_3_7_8_1_3_7 def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float , A : float ) -> float: """simple docstring""" __snake_case : List[Any] = (AXIS_A - AXIS_B) / AXIS_A __snake_case : Dict = atan((1 - flattening) * tan(radians(A ) ) ) __snake_case : str = atan((1 - flattening) * tan(radians(A ) ) ) __snake_case : List[Any] = radians(A ) __snake_case : str = radians(A ) # Equation __snake_case : str = sin((phi_a - phi_a) / 2 ) __snake_case : Union[str, Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __snake_case : str = sqrt(sin_sq_phi + (cos(A ) * cos(A ) * sin_sq_lambda) ) return 2 * RADIUS * asin(A ) if __name__ == "__main__": import doctest doctest.testmod()
61
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class a_ ( UpperCamelCase_ ): def __get__(self , __a , __a=None) -> int: """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') __snake_case : List[Any] = '__cached_' + self.fget.__name__ __snake_case : Tuple = getattr(__a , __a , __a) if cached is None: __snake_case : List[Any] = self.fget(__a) setattr(__a , __a , __a) return cached def _SCREAMING_SNAKE_CASE ( A : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case : Dict = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def _SCREAMING_SNAKE_CASE ( A : str ) -> Union[str, Any]: """simple docstring""" if is_torch_fx_proxy(A ): return True if is_torch_available(): import torch if isinstance(A , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(A , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(A , (jnp.ndarray, Tracer) ): return True return isinstance(A , np.ndarray ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Dict: """simple docstring""" return isinstance(A , np.ndarray ) def _SCREAMING_SNAKE_CASE ( A : str ) -> Any: """simple docstring""" return _is_numpy(A ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Tuple: """simple docstring""" import torch return isinstance(A , torch.Tensor ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Union[str, Any]: """simple docstring""" return False if not is_torch_available() else _is_torch(A ) def _SCREAMING_SNAKE_CASE ( A : Any ) -> str: """simple docstring""" import torch return isinstance(A , torch.device ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" return False if not is_torch_available() else _is_torch_device(A ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Any: """simple docstring""" import torch if isinstance(A , A ): if hasattr(A , A ): __snake_case : List[str] = getattr(A , A ) else: return False return isinstance(A , torch.dtype ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> List[str]: """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(A ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Any: """simple docstring""" import tensorflow as tf return isinstance(A , tf.Tensor ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" return False if not is_tf_available() else _is_tensorflow(A ) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> int: """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(A , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(A ) return type(A ) == tf.Tensor def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Dict: """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(A ) def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Any: """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(A , jnp.ndarray ) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return False if not is_flax_available() else _is_jax(A ) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Union[str, Any]: """simple docstring""" if isinstance(A , (dict, UserDict) ): return {k: to_py_obj(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return [to_py_obj(A ) for o in obj] elif is_tf_tensor(A ): return obj.numpy().tolist() elif is_torch_tensor(A ): return obj.detach().cpu().tolist() elif is_jax_tensor(A ): return np.asarray(A ).tolist() elif isinstance(A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Any: """simple docstring""" if isinstance(A , (dict, UserDict) ): return {k: to_numpy(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return np.array(A ) elif is_tf_tensor(A ): return obj.numpy() elif is_torch_tensor(A ): return obj.detach().cpu().numpy() elif is_jax_tensor(A ): return np.asarray(A ) else: return obj class a_ ( UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Dict = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(F"""{self.__class__.__name__} has no fields.""") if not all(field.default is None for field in class_fields[1:]): raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""") __snake_case : Dict = getattr(self , class_fields[0].name) __snake_case : int = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a , __a): __snake_case : Tuple = first_field.items() __snake_case : List[Any] = True else: try: __snake_case : int = iter(__a) __snake_case : Union[str, Any] = True except TypeError: __snake_case : int = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a , (list, tuple)) or not len(__a) == 2 or not isinstance(element[0] , __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __snake_case : str = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""") break setattr(self , element[0] , element[1]) if element[1] is not None: __snake_case : Optional[Any] = element[1] elif first_field is not None: __snake_case : str = first_field else: for field in class_fields: __snake_case : Optional[int] = getattr(self , field.name) if v is not None: __snake_case : Tuple = v def __delitem__(self , *__a , **__a) -> Optional[Any]: """simple docstring""" raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Optional[Any]: """simple docstring""" raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> List[str]: """simple docstring""" raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""") def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Union[str, Any]: """simple docstring""" raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""") def __getitem__(self , __a) -> Optional[Any]: """simple docstring""" if isinstance(__a , __a): __snake_case : Tuple = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self , __a , __a) -> Optional[Any]: """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a , __a) super().__setattr__(__a , __a) def __setitem__(self , __a , __a) -> Dict: """simple docstring""" super().__setitem__(__a , __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a , __a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple[Any]: """simple docstring""" return tuple(self[k] for k in self.keys()) class a_ ( UpperCamelCase_ , UpperCamelCase_ ): @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a) -> List[str]: """simple docstring""" raise ValueError( F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""") class a_ ( UpperCamelCase_ ): _snake_case = """longest""" _snake_case = """max_length""" _snake_case = """do_not_pad""" class a_ ( UpperCamelCase_ ): _snake_case = """pt""" _snake_case = """tf""" _snake_case = """np""" _snake_case = """jax""" class a_ : def __init__(self , __a) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = context_managers __snake_case : Tuple = ExitStack() def __enter__(self) -> Any: """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__(self , *__a , **__a) -> int: """simple docstring""" self.stack.__exit__(*__a , **__a) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = infer_framework(A ) if framework == "tf": __snake_case : int = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __snake_case : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: __snake_case : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : Dict = model_class.__name__ __snake_case : Dict = infer_framework(A ) if framework == "tf": __snake_case : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __snake_case : int = inspect.signature(model_class.forward ) # PyTorch models else: __snake_case : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _SCREAMING_SNAKE_CASE ( A : MutableMapping , A : str = "" , A : str = "." ) -> Any: """simple docstring""" def _flatten_dict(A : int , A : Union[str, Any]="" , A : List[str]="." ): for k, v in d.items(): __snake_case : Dict = str(A ) + delimiter + str(A ) if parent_key else k if v and isinstance(A , A ): yield from flatten_dict(A , A , delimiter=A ).items() else: yield key, v return dict(_flatten_dict(A , A , A ) ) @contextmanager def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : bool = False ) -> Union[str, Any]: """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _SCREAMING_SNAKE_CASE ( A : Dict , A : Optional[int]=None ) -> List[Any]: """simple docstring""" if is_numpy_array(A ): return np.transpose(A , axes=A ) elif is_torch_tensor(A ): return array.T if axes is None else array.permute(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.transpose(A , perm=A ) elif is_jax_tensor(A ): return jnp.transpose(A , axes=A ) else: raise ValueError(F"""Type not supported for transpose: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : str , A : List[Any] ) -> List[str]: """simple docstring""" if is_numpy_array(A ): return np.reshape(A , A ) elif is_torch_tensor(A ): return array.reshape(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.reshape(A , A ) elif is_jax_tensor(A ): return jnp.reshape(A , A ) else: raise ValueError(F"""Type not supported for reshape: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : Any , A : List[Any]=None ) -> Union[str, Any]: """simple docstring""" if is_numpy_array(A ): return np.squeeze(A , axis=A ) elif is_torch_tensor(A ): return array.squeeze() if axis is None else array.squeeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.squeeze(A , axis=A ) elif is_jax_tensor(A ): return jnp.squeeze(A , axis=A ) else: raise ValueError(F"""Type not supported for squeeze: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : Optional[int] ) -> int: """simple docstring""" if is_numpy_array(A ): return np.expand_dims(A , A ) elif is_torch_tensor(A ): return array.unsqueeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.expand_dims(A , axis=A ) elif is_jax_tensor(A ): return jnp.expand_dims(A , axis=A ) else: raise ValueError(F"""Type not supported for expand_dims: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> Optional[int]: """simple docstring""" if is_numpy_array(A ): return np.size(A ) elif is_torch_tensor(A ): return array.numel() elif is_tf_tensor(A ): import tensorflow as tf return tf.size(A ) elif is_jax_tensor(A ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(A )}.""" ) def _SCREAMING_SNAKE_CASE ( A : int , A : Dict ) -> Optional[Any]: """simple docstring""" for key, value in auto_map.items(): if isinstance(A , (tuple, list) ): __snake_case : int = [F"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: __snake_case : Union[str, Any] = F"""{repo_id}--{value}""" return auto_map def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> Tuple: """simple docstring""" for base_class in inspect.getmro(A ): __snake_case : Union[str, Any] = base_class.__module__ __snake_case : str = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
1
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
1
'''simple docstring''' import fire from utils import calculate_rouge, save_json def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Union[str, Any]=None , **A : List[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = [x.strip() for x in open(A ).readlines()] __snake_case : Optional[Any] = [x.strip() for x in open(A ).readlines()][: len(A )] __snake_case : List[Any] = calculate_rouge(A , A , **A ) if save_path is not None: save_json(A , A , indent=A ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
61
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
1
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = OpenAIGPTTokenizer _snake_case = OpenAIGPTTokenizerFast _snake_case = True _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __snake_case : Optional[int] = dict(zip(__a , range(len(__a)))) __snake_case : Tuple = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', ''] __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w') as fp: fp.write(json.dumps(__a)) with open(self.merges_file , 'w') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file) __snake_case : Dict = 'lower' __snake_case : List[Any] = ['low', 'er</w>'] __snake_case : Dict = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : int = tokens + ['<unk>'] __snake_case : int = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a) def SCREAMING_SNAKE_CASE__ (self , __a=1_5) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a) # Simple input __snake_case : Optional[Any] = 'This is a simple input' __snake_case : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2'] __snake_case : Union[str, Any] = ('This is a simple input', 'This is a pair') __snake_case : Dict = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class a_ ( UpperCamelCase_ ): pass
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class a_ ( UpperCamelCase_ ): def __init__(self , *__a , **__a) -> Optional[Any]: """simple docstring""" super().__init__(*__a , **__a) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE__ (self , __a=None) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = {} if top_k is not None: __snake_case : str = top_k return {}, {}, postprocess_params def __call__(self , __a , **__a) -> str: """simple docstring""" return super().__call__(__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = load_image(__a) __snake_case : Optional[int] = self.image_processor(images=__a , return_tensors=self.framework) return model_inputs def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = self.model(**__a) return model_outputs def SCREAMING_SNAKE_CASE__ (self , __a , __a=5) -> str: """simple docstring""" if top_k > self.model.config.num_labels: __snake_case : Optional[int] = self.model.config.num_labels if self.framework == "pt": __snake_case : List[str] = model_outputs.logits.softmax(-1)[0] __snake_case ,__snake_case : Tuple = probs.topk(__a) elif self.framework == "tf": __snake_case : List[Any] = stable_softmax(model_outputs.logits , axis=-1)[0] __snake_case : int = tf.math.top_k(__a , k=__a) __snake_case ,__snake_case : Optional[int] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""") __snake_case : Any = scores.tolist() __snake_case : int = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a)]
61
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int = 10_00 ) -> int: """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
61
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): def __init__(self , *__a , **__a) -> None: """simple docstring""" warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , __a , ) super().__init__(*__a , **__a)
61
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __A = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( A : torch.nn.Module , A : BnbQuantizationConfig , A : Union[str, os.PathLike] = None , A : Optional[Dict[str, Union[int, str, torch.device]]] = None , A : Optional[List[str]] = None , A : Optional[Dict[Union[int, str], Union[int, str]]] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , ) -> str: """simple docstring""" __snake_case : List[Any] = bnb_quantization_config.load_in_abit __snake_case : Dict = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) __snake_case : str = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: __snake_case : str = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: __snake_case : List[Any] = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) __snake_case : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: __snake_case : Optional[Any] = [] __snake_case : Dict = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft __snake_case : Optional[int] = load_in_abit __snake_case : Union[str, Any] = load_in_abit __snake_case : List[str] = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) __snake_case : Dict = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype __snake_case : int = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: __snake_case : List[str] = name.replace('.weight' , '' ).replace('.bias' , '' ) __snake_case : Tuple = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): __snake_case : List[str] = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) __snake_case : str = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): __snake_case : Optional[int] = True __snake_case : Tuple = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _SCREAMING_SNAKE_CASE ( A : List[str] , A : Any , A : Optional[Any]=None , A : List[Any]=None , A : Optional[int]=None ) -> Any: """simple docstring""" if device_map is None: if torch.cuda.is_available(): __snake_case : Optional[Any] = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) __snake_case : int = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) __snake_case : Dict = {} __snake_case : Optional[int] = special_dtypes __snake_case : Dict = no_split_module_classes __snake_case : Dict = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": __snake_case : int = get_balanced_memory( A , low_zero=(device_map == 'balanced_low_0') , max_memory=A , **A , ) __snake_case : List[str] = max_memory __snake_case : str = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu __snake_case : int = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules __snake_case : List[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _SCREAMING_SNAKE_CASE ( A : int , A : Optional[Any] , A : str=None , A : Union[str, Any]=None ) -> str: """simple docstring""" if modules_to_not_convert is None: __snake_case : int = [] __snake_case ,__snake_case : Optional[int] = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _SCREAMING_SNAKE_CASE ( A : List[str] , A : List[str] , A : Tuple=None , A : Optional[Any]=None , ) -> Optional[Any]: """simple docstring""" __snake_case : Dict = False for name, module in model.named_children(): if current_key_name is None: __snake_case : Optional[int] = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` __snake_case : Any = '.'.join(A ) __snake_case : Union[str, Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: __snake_case : List[Any] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: __snake_case : Dict = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: __snake_case : Union[str, Any] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) __snake_case : Dict = module.weight.data if module.bias is not None: __snake_case : List[str] = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) __snake_case : List[str] = True if len(list(module.children() ) ) > 0: __snake_case ,__snake_case : List[Any] = _replace_with_bnb_layers( A , A , A , A ) __snake_case : int = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _SCREAMING_SNAKE_CASE ( A : str ) -> Optional[int]: """simple docstring""" # Create a copy of the model with init_empty_weights(): __snake_case : Optional[int] = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` __snake_case : List[str] = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): __snake_case : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __snake_case : Any = sum(A , [] ) __snake_case : int = len(A ) > 0 # Check if it is a base model __snake_case : Any = False if hasattr(A , 'base_model_prefix' ): __snake_case : Tuple = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __snake_case : Tuple = list(model.named_children() ) __snake_case : str = [list_modules[-1][0]] # add last module together with tied weights __snake_case : str = set(A ) - set(A ) __snake_case : Dict = list(set(A ) ) + list(A ) # remove ".weight" from the keys __snake_case : Optional[Any] = ['.weight', '.bias'] __snake_case : Any = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __snake_case : str = name.replace(A , '' ) filtered_module_names.append(A ) return filtered_module_names def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[Any]: """simple docstring""" for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _SCREAMING_SNAKE_CASE ( A : nn.Module ) -> Optional[Any]: """simple docstring""" return next(parameter.parameters() ).device def _SCREAMING_SNAKE_CASE ( A : List[str] , A : List[Any] , A : Tuple , A : int , A : List[Any] , A : Optional[Any] , A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) __snake_case : List[str] = param_name __snake_case : List[str] = model if "." in tensor_name: __snake_case : Optional[Any] = tensor_name.split('.' ) for split in splits[:-1]: __snake_case : List[Any] = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) __snake_case : Union[str, Any] = new_module __snake_case : List[Any] = splits[-1] # offload weights __snake_case : str = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('weight' , 'SCB' ) , A , index=A ) set_module_tensor_to_device(A , A , 'meta' , dtype=A , value=torch.empty(*param.size() ) )
61
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __A = logging.get_logger(__name__) __A = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) __A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _SCREAMING_SNAKE_CASE ( A : str ) -> Union[str, Any]: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __snake_case : Tuple = model_type_to_module_name(A ) __snake_case : Tuple = importlib.import_module(F""".{module_name}""" , 'transformers.models' ) try: return getattr(A , A ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(A , '__name__' , A ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __snake_case : List[str] = importlib.import_module('transformers' ) if hasattr(A , A ): return getattr(A , A ) return None def _SCREAMING_SNAKE_CASE ( A : Union[str, os.PathLike] , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Optional[Any] , ) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = get_file_from_repo( A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , ) if resolved_config_file is None: logger.info( 'Could not locate the feature extractor configuration file, will try to use the model config instead.' ) return {} with open(A , encoding='utf-8' ) as reader: return json.load(A ) class a_ : def __init__(self) -> Dict: """simple docstring""" raise EnvironmentError( 'AutoFeatureExtractor is designed to be instantiated ' 'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(__a) def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = kwargs.pop('config' , __a) __snake_case : Tuple = kwargs.pop('trust_remote_code' , __a) __snake_case : List[Any] = True __snake_case ,__snake_case : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(__a , **__a) __snake_case : Optional[int] = config_dict.get('feature_extractor_type' , __a) __snake_case : str = None if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}): __snake_case : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor'] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__a , __a): __snake_case : Optional[int] = AutoConfig.from_pretrained(__a , **__a) # It could be in `config.feature_extractor_type`` __snake_case : int = getattr(__a , 'feature_extractor_type' , __a) if hasattr(__a , 'auto_map') and "AutoFeatureExtractor" in config.auto_map: __snake_case : Tuple = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: __snake_case : List[Any] = feature_extractor_class_from_name(__a) __snake_case : Optional[Any] = feature_extractor_auto_map is not None __snake_case : Union[str, Any] = feature_extractor_class is not None or type(__a) in FEATURE_EXTRACTOR_MAPPING __snake_case : str = resolve_trust_remote_code( __a , __a , __a , __a) if has_remote_code and trust_remote_code: __snake_case : List[str] = get_class_from_dynamic_module( __a , __a , **__a) __snake_case : Any = kwargs.pop('code_revision' , __a) if os.path.isdir(__a): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__a , **__a) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__a , **__a) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__a) in FEATURE_EXTRACTOR_MAPPING: __snake_case : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(__a)] return feature_extractor_class.from_dict(__a , **__a) raise ValueError( F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}""") @staticmethod def SCREAMING_SNAKE_CASE__ (__a , __a) -> Dict: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(__a , __a)
61
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
1
'''simple docstring''' import numpy as np import qiskit def _SCREAMING_SNAKE_CASE ( A : int = 8 , A : int | None = None ) -> str: """simple docstring""" __snake_case : str = np.random.default_rng(seed=A ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __snake_case : Optional[int] = 6 * key_len # Measurement basis for Alice's qubits. __snake_case : str = rng.integers(2 , size=A ) # The set of states Alice will prepare. __snake_case : Dict = rng.integers(2 , size=A ) # Measurement basis for Bob's qubits. __snake_case : Tuple = rng.integers(2 , size=A ) # Quantum Circuit to simulate BB84 __snake_case : List[str] = qiskit.QuantumCircuit(A , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(A ): if alice_state[index] == 1: bbaa_circ.x(A ) if alice_basis[index] == 1: bbaa_circ.h(A ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(A ): if bob_basis[index] == 1: bbaa_circ.h(A ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __snake_case : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __snake_case : List[Any] = qiskit.execute(A , A , shots=1 , seed_simulator=A ) # Returns the result of measurement. __snake_case : List[Any] = job.result().get_counts(A ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __snake_case : Optional[Any] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( A , A , A ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __snake_case : Any = gen_key[:key_len] if len(A ) >= key_len else gen_key.ljust(A , '0' ) return key if __name__ == "__main__": print(f'''The generated key is : {bbaa(8, seed=0)}''') from doctest import testmod testmod()
61
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __A = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: __A = json.load(f) @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" return FSMTTokenizer.from_pretrained(__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> str: """simple docstring""" __snake_case : str = FSMTForConditionalGeneration.from_pretrained(__a).to(__a) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ]) @slow def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[str]: """simple docstring""" __snake_case : str = F"""facebook/wmt19-{pair}""" __snake_case : int = self.get_tokenizer(__a) __snake_case : Tuple = self.get_model(__a) __snake_case : Tuple = bleu_data[pair]['src'] __snake_case : Union[str, Any] = bleu_data[pair]['tgt'] __snake_case : List[str] = tokenizer(__a , return_tensors='pt' , truncation=__a , padding='longest').to(__a) __snake_case : Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) __snake_case : Any = tokenizer.batch_decode( __a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a) __snake_case : Tuple = calculate_bleu(__a , __a) print(__a) self.assertGreaterEqual(scores['bleu'] , __a)
61
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Tuple , A : Union[str, Any] , A : List[str] , A : Any ) -> Any: """simple docstring""" for attribute in key.split('.' ): __snake_case : Optional[int] = getattr(A , A ) if weight_type is not None: __snake_case : Any = getattr(A , A ).shape else: __snake_case : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : Optional[Any] = value elif weight_type == "weight_g": __snake_case : str = value elif weight_type == "weight_v": __snake_case : List[Any] = value elif weight_type == "bias": __snake_case : List[str] = value else: __snake_case : List[Any] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Dict ) -> int: """simple docstring""" __snake_case : Optional[Any] = [] __snake_case : int = fairseq_model.state_dict() __snake_case : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == 'group' , ) __snake_case : List[Any] = True else: for key, mapped_key in MAPPING.items(): __snake_case : Tuple = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __snake_case : List[str] = True if "*" in mapped_key: __snake_case : int = name.split(A )[0].split('.' )[-2] __snake_case : List[str] = mapped_key.replace('*' , A ) if "weight_g" in name: __snake_case : str = 'weight_g' elif "weight_v" in name: __snake_case : Tuple = 'weight_v' elif "weight" in name: __snake_case : Optional[int] = 'weight' elif "bias" in name: __snake_case : str = 'bias' else: __snake_case : List[str] = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Optional[int] , A : List[str] , A : int , A : int ) -> Dict: """simple docstring""" __snake_case : Tuple = full_name.split('conv_layers.' )[-1] __snake_case : Optional[int] = name.split('.' ) __snake_case : str = int(items[0] ) __snake_case : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __snake_case : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : int ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[Any] = SEWConfig() if is_finetuned: __snake_case : List[str] = model.wav_encoder.wav_model.cfg else: __snake_case : Dict = model.cfg __snake_case : Dict = fs_config.conv_bias __snake_case : Tuple = eval(fs_config.conv_feature_layers ) __snake_case : Optional[Any] = [x[0] for x in conv_layers] __snake_case : List[Any] = [x[1] for x in conv_layers] __snake_case : str = [x[2] for x in conv_layers] __snake_case : Tuple = 'gelu' __snake_case : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' __snake_case : List[Any] = 0.0 __snake_case : Tuple = fs_config.activation_fn.name __snake_case : List[Any] = fs_config.encoder_embed_dim __snake_case : Tuple = 0.02 __snake_case : List[Any] = fs_config.encoder_ffn_embed_dim __snake_case : Optional[Any] = 1e-5 __snake_case : Optional[Any] = fs_config.encoder_layerdrop __snake_case : Optional[Any] = fs_config.encoder_attention_heads __snake_case : Union[str, Any] = fs_config.conv_pos_groups __snake_case : Any = fs_config.conv_pos __snake_case : Any = len(A ) __snake_case : str = fs_config.encoder_layers __snake_case : Optional[int] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __snake_case : int = model.cfg __snake_case : Dict = fs_config.final_dropout __snake_case : List[str] = fs_config.layerdrop __snake_case : Union[str, Any] = fs_config.activation_dropout __snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __snake_case : Tuple = fs_config.attention_dropout __snake_case : Tuple = fs_config.dropout_input __snake_case : Optional[Any] = fs_config.dropout __snake_case : int = fs_config.mask_channel_length __snake_case : Optional[Any] = fs_config.mask_channel_prob __snake_case : int = fs_config.mask_length __snake_case : Dict = fs_config.mask_prob __snake_case : Tuple = 'Wav2Vec2FeatureExtractor' __snake_case : List[str] = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : int , A : List[str] , A : List[str]=None , A : List[Any]=None , A : Optional[int]=True ) -> List[Any]: """simple docstring""" if is_finetuned: __snake_case ,__snake_case ,__snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __snake_case ,__snake_case ,__snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __snake_case : List[str] = SEWConfig.from_pretrained(A ) else: __snake_case : Optional[Any] = convert_config(model[0] , A ) __snake_case : Optional[int] = model[0].eval() __snake_case : Tuple = True if config.feat_extract_norm == 'layer' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) if is_finetuned: if dict_path: __snake_case : Union[str, Any] = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : str = target_dict.pad_index __snake_case : Union[str, Any] = target_dict.bos_index __snake_case : List[str] = target_dict.pad_index __snake_case : Tuple = target_dict.bos_index __snake_case : List[str] = target_dict.eos_index __snake_case : Union[str, Any] = len(target_dict.symbols ) __snake_case : int = os.path.join(A , 'vocab.json' ) if not os.path.isdir(A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) ) return os.makedirs(A , exist_ok=A ) with open(A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , A ) __snake_case : List[str] = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , ) __snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) __snake_case : Optional[int] = SEWForCTC(A ) else: __snake_case : Any = SEWModel(A ) feature_extractor.save_pretrained(A ) recursively_load_weights(A , A , A ) hf_model.save_pretrained(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __A = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
61
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any , A : Tuple ) -> Any: """simple docstring""" __snake_case : Dict = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, oder?', } # BLUE scores as follows: # "pair": [fairseq, transformers] __snake_case : int = { 'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'], 'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'], 'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'], 'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'], } __snake_case : int = F"""{src_lang}-{tgt_lang}""" __snake_case : List[str] = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(A , exist_ok=A ) __snake_case : Union[str, Any] = os.path.join(A , 'README.md' ) print(F"""Generating {path}""" ) with open(A , 'w' , encoding='utf-8' ) as f: f.write(A ) # make sure we are under the root of the project __A = Path(__file__).resolve().parent.parent.parent __A = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __A , __A , __A = model_name.split('''-''') __A = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
1
'''simple docstring''' __A = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) __A = frozenset(['''prompt''', '''negative_prompt''']) __A = frozenset([]) __A = frozenset(['''image''']) __A = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) __A = frozenset(['''image''']) __A = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) __A = frozenset(['''prompt''', '''image''', '''negative_prompt''']) __A = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) __A = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) __A = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) __A = frozenset(['''image''', '''mask_image''']) __A = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) __A = frozenset(['''example_image''', '''image''', '''mask_image''']) __A = frozenset(['''class_labels''']) __A = frozenset(['''class_labels''']) __A = frozenset(['''batch_size''']) __A = frozenset([]) __A = frozenset(['''batch_size''']) __A = frozenset([]) __A = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) __A = frozenset(['''prompt''', '''negative_prompt''']) __A = frozenset(['''input_tokens''']) __A = frozenset(['''input_tokens'''])
61
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) __A = None __A = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } __A = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple=1 , A : Union[str, Any]=2_56 ) -> int: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _SCREAMING_SNAKE_CASE ( A : int ) -> List[str]: """simple docstring""" with open(A , 'r' ) as f: return json.load(A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any ) -> str: """simple docstring""" with open(A , 'w' ) as f: json.dump(A , A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=True ) -> List[str]: """simple docstring""" os.makedirs(A , exist_ok=A ) __snake_case : str = os.path.join(A , 'tmp' ) os.makedirs(A , exist_ok=A ) __snake_case : int = read_json(os.path.join(A , 'params.json' ) ) __snake_case : int = NUM_SHARDS[model_size] __snake_case : Any = params['n_layers'] __snake_case : Any = params['n_heads'] __snake_case : int = n_heads // num_shards __snake_case : Optional[int] = params['dim'] __snake_case : str = dim // n_heads __snake_case : Dict = 10000.0 __snake_case : List[Any] = 1.0 / (base ** (torch.arange(0 , A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: __snake_case : Tuple = params['n_kv_heads'] # for GQA / MQA __snake_case : Any = n_heads_per_shard // num_key_value_heads __snake_case : Any = dim // num_key_value_heads else: # compatibility with other checkpoints __snake_case : Any = n_heads __snake_case : Union[str, Any] = n_heads_per_shard __snake_case : Union[str, Any] = dim # permute for sliced rotary def permute(A : Tuple , A : List[str]=n_heads , A : Dict=dim , A : Union[str, Any]=dim ): return w.view(A , dima // n_heads // 2 , 2 , A ).transpose(1 , 2 ).reshape(A , A ) print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) __snake_case : str = torch.load(os.path.join(A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded __snake_case : int = [ torch.load(os.path.join(A , F"""consolidated.{i:02d}.pth""" ) , map_location='cpu' ) for i in range(A ) ] __snake_case : Dict = 0 __snake_case : Any = {'weight_map': {}} for layer_i in range(A ): __snake_case : str = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded __snake_case : List[str] = { F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wq.weight"""] ), F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wk.weight"""] ), F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""], F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""], F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""], F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""], F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""], F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""], F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. __snake_case : Optional[int] = { F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.attention_norm.weight""" ].clone(), F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } __snake_case : Optional[int] = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) ) __snake_case : Any = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) , A , A , A , ) __snake_case : Tuple = torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view( A , A , A ) for i in range(A ) ] , dim=0 , ).reshape(A , A ) __snake_case : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(A )] , dim=1 ) __snake_case : Union[str, Any] = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(A )] , dim=0 ) __snake_case : int = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(A )] , dim=1 ) __snake_case : Any = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(A )] , dim=0 ) __snake_case : str = inv_freq for k, v in state_dict.items(): __snake_case : Union[str, Any] = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) __snake_case : Optional[int] = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded __snake_case : List[str] = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: __snake_case : str = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(A )] , dim=0 ), } for k, v in state_dict.items(): __snake_case : Union[str, Any] = filename param_count += v.numel() torch.save(A , os.path.join(A , A ) ) # Write configs __snake_case : Optional[int] = {'total_size': param_count * 2} write_json(A , os.path.join(A , 'pytorch_model.bin.index.json' ) ) __snake_case : List[str] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 __snake_case : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_56 __snake_case : Optional[Any] = LlamaConfig( hidden_size=A , intermediate_size=compute_intermediate_size(A , A , A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=A , ) config.save_pretrained(A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) __snake_case : List[str] = LlamaForCausalLM.from_pretrained(A , torch_dtype=torch.floataa , low_cpu_mem_usage=A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(A , safe_serialization=A ) shutil.rmtree(A ) def _SCREAMING_SNAKE_CASE ( A : Dict , A : Dict ) -> Dict: """simple docstring""" # Initialize the tokenizer based on the `spm` model __snake_case : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) __snake_case : str = tokenizer_class(A ) tokenizer.save_pretrained(A ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=A , help='Whether or not to save using `safetensors`.' ) __snake_case : Tuple = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) __snake_case : Optional[int] = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , A ) if __name__ == "__main__": main()
61
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
1
'''simple docstring''' from collections.abc import Callable def _SCREAMING_SNAKE_CASE ( A : Callable[[float], float] , A : float , A : float ) -> float: """simple docstring""" __snake_case : float = a __snake_case : float = b if function(A ) == 0: # one of the a or b is a root for the function return a elif function(A ) == 0: return b elif ( function(A ) * function(A ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(A ) == 0: return mid elif function(A ) * function(A ) < 0: __snake_case : Optional[int] = mid else: __snake_case : Any = mid __snake_case : Optional[Any] = start + (end - start) / 2.0 return mid def _SCREAMING_SNAKE_CASE ( A : float ) -> float: """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
61
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class a_ ( UpperCamelCase_ ): _snake_case = 42 _snake_case = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class a_ ( UpperCamelCase_ ): _snake_case = 42 _snake_case = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
61
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : bool = True , A : float = math.inf , A : float = -math.inf , A : float = math.inf , A : float = -math.inf , A : bool = False , A : float = 1_00 , A : float = 0.01 , A : float = 1 , ) -> Any: """simple docstring""" __snake_case : List[str] = False __snake_case : List[str] = search_prob __snake_case : Any = start_temperate __snake_case : Any = [] __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = None while not search_end: __snake_case : Dict = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(A ) iterations += 1 __snake_case : Tuple = None __snake_case : Optional[int] = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor __snake_case : Union[str, Any] = neighbors.pop(A ) __snake_case : Dict = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Optional[Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Any = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : Union[str, Any] = picked_neighbor __snake_case : int = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : str = True else: __snake_case : Optional[int] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(A ) , A ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[str] ) -> Dict: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) __A = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) __A = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def _SCREAMING_SNAKE_CASE ( A : str , A : List[Any] ) -> Any: """simple docstring""" return (3 * x**2) - (6 * y) __A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' ) __A = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __A = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' f'''{local_min.score()}''' )
61
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
1
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
61
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __A = get_logger(__name__) class a_ : _snake_case = """dummy_data""" _snake_case = """datasets""" _snake_case = False def __init__(self , __a , __a , __a , __a = None , __a = False , __a = True , __a = None , ) -> str: """simple docstring""" __snake_case : Union[str, Any] = 0 __snake_case : str = dataset_name __snake_case : str = cache_dir __snake_case : Tuple = use_local_dummy_data __snake_case : int = config # download_callbacks take a single url as input __snake_case : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __snake_case : List[str] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __snake_case : Optional[int] = str(__a) # to be downloaded __snake_case : List[str] = None __snake_case : List[str] = None @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if self._dummy_file is None: __snake_case : Tuple = self.download_dummy_data() return self._dummy_file @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name) # structure is dummy / version_name return os.path.join('dummy' , self.version_name) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return os.path.join(self.dummy_data_folder , 'dummy_data.zip') def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __snake_case : int = cached_path( __a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a) return os.path.join(__a , self.dummy_file_name) @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file) @property def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" if self._bucket_url is None: __snake_case : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/')) return self._bucket_url @property def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1]) def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[Any]: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested __snake_case : int = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __snake_case : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__a , __a): return self.create_dummy_data_dict(__a , __a) elif isinstance(__a , (list, tuple)): return self.create_dummy_data_list(__a , __a) else: return self.create_dummy_data_single(__a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , *__a) -> Optional[int]: """simple docstring""" return self.download_and_extract(__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]: """simple docstring""" return self.download_and_extract(__a) def SCREAMING_SNAKE_CASE__ (self , __a , *__a , **__a) -> Union[str, Any]: """simple docstring""" return path def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return {} def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__a , __a): for single_url in single_urls: download_callback(__a) else: __snake_case : Optional[Any] = single_urls download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__a , __a): __snake_case : Dict = [os.path.join(__a , urllib.parse.quote_plus(Path(__a).name)) for x in single_urls] else: __snake_case : Optional[Any] = single_urls __snake_case : List[str] = os.path.join(__a , urllib.parse.quote_plus(Path(__a).name)) __snake_case : Optional[Any] = value # make sure that values are unique if all(isinstance(__a , __a) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values()): # append key to value to make its name unique __snake_case : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> List[Any]: """simple docstring""" __snake_case : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __snake_case : int = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __a)) for url in data_url) __snake_case : Any = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url) if data_url and (is_tf_records or is_pubmed_records): __snake_case : Optional[Any] = [data_url[0]] * len(__a) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case : List[Any] = os.path.join(__a , urllib.parse.quote_plus(single_url.split('/')[-1])) dummy_data_list.append(__a) return dummy_data_list def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(__a) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case : Tuple = os.path.join(__a , urllib.parse.quote_plus(data_url.split('/')[-1])) if os.path.exists(__a) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" def _iter_archive_members(__a): # this preserves the order of the members inside the ZIP archive __snake_case : Optional[Any] = Path(self.dummy_file).parent __snake_case : int = path.relative_to(__a) with ZipFile(self.local_path_to_dummy_data) as zip_file: __snake_case : List[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(__a) __snake_case : Tuple = Path(__a) __snake_case : int = _iter_archive_members(__a) if self.use_local_dummy_data else path.rglob('*') for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__')): yield file_path.relative_to(__a).as_posix(), file_path.open('rb') def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[int]: """simple docstring""" if not isinstance(__a , __a): __snake_case : Union[str, Any] = [paths] for path in paths: if os.path.isfile(__a): if os.path.basename(__a).startswith(('.', '__')): return yield path else: for dirpath, dirnames, filenames in os.walk(__a): if os.path.basename(__a).startswith(('.', '__')): continue dirnames.sort() for filename in sorted(__a): if filename.startswith(('.', '__')): continue yield os.path.join(__a , __a)
61
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class a_ ( UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : int = tempfile.mkdtemp() __snake_case : List[Any] = 8 # DPR tok __snake_case : Union[str, Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer') os.makedirs(__a , exist_ok=__a) __snake_case : Dict = os.path.join(__a , DPR_VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) # BART tok __snake_case : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __snake_case : int = dict(zip(__a , range(len(__a)))) __snake_case : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __snake_case : List[str] = {'unk_token': '<unk>'} __snake_case : Optional[Any] = os.path.join(self.tmpdirname , 'bart_tokenizer') os.makedirs(__a , exist_ok=__a) __snake_case : Union[str, Any] = os.path.join(__a , BART_VOCAB_FILES_NAMES['vocab_file']) __snake_case : Optional[Any] = os.path.join(__a , BART_VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(__a) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self) -> DPRQuestionEncoderTokenizer: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer')) def SCREAMING_SNAKE_CASE__ (self) -> BartTokenizer: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer')) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) @require_tokenizers def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Any = os.path.join(self.tmpdirname , 'rag_tokenizer') __snake_case : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict()) __snake_case : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer()) rag_config.save_pretrained(__a) rag_tokenizer.save_pretrained(__a) __snake_case : Any = RagTokenizer.from_pretrained(__a , config=__a) self.assertIsInstance(new_rag_tokenizer.question_encoder , __a) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator , __a) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab()) @slow def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : List[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq') __snake_case : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __snake_case : Tuple = tokenizer(__a) self.assertIsNotNone(__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Dict = RagTokenizer.from_pretrained('facebook/rag-sequence-nq') __snake_case : int = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __snake_case : Union[str, Any] = tokenizer(__a) self.assertIsNotNone(__a)
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , ) -> Tuple: """simple docstring""" __snake_case : str = size if size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : List[Any] = batch_size __snake_case : str = num_channels __snake_case : Any = image_size __snake_case : Union[str, Any] = min_resolution __snake_case : Optional[Any] = max_resolution __snake_case : Optional[Any] = do_resize __snake_case : Optional[int] = size __snake_case : str = apply_ocr def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Union[str, Any] = LayoutLMvaImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'apply_ocr')) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8}) __snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2}) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='pt') self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a) self.assertIsInstance(encoding.boxes , __a) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __snake_case : Any = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __snake_case : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test') __snake_case : List[str] = Image.open(ds[0]['file']).convert('RGB') __snake_case : str = image_processing(__a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __snake_case : Dict = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __snake_case : List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a) self.assertListEqual(encoding.boxes , __a) # with apply_OCR = False __snake_case : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=__a) __snake_case : int = image_processing(__a , return_tensors='pt') self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
61
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
1
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" __snake_case : Optional[int] = tmp_path / 'file.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = tmp_path / 'malformed_file.csv' __snake_case : Dict = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = tmp_path / 'csv_with_image.csv' __snake_case : Optional[int] = textwrap.dedent( F"""\ image {image_file} """ ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : int ) -> str: """simple docstring""" __snake_case : int = tmp_path / 'csv_with_label.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) @pytest.fixture def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> Dict: """simple docstring""" __snake_case : str = tmp_path / 'csv_with_int_list.csv' __snake_case : Optional[int] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(A , 'w' ) as f: f.write(A ) return str(A ) def _SCREAMING_SNAKE_CASE ( A : List[str] , A : int , A : Tuple ) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = Csv() __snake_case : Dict = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(A , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(A ) in record.message for record in caplog.records ) @require_pil def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[int]: """simple docstring""" with open(A , encoding='utf-8' ) as f: __snake_case : int = f.read().splitlines()[1] __snake_case : List[Any] = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __snake_case : List[Any] = csv._generate_tables([[csv_file_with_image]] ) __snake_case : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __snake_case : Optional[Any] = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def _SCREAMING_SNAKE_CASE ( A : Dict ) -> List[Any]: """simple docstring""" with open(A , encoding='utf-8' ) as f: __snake_case : List[str] = f.read().splitlines()[1:] __snake_case : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __snake_case : Union[str, Any] = csv._generate_tables([[csv_file_with_label]] ) __snake_case : Dict = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __snake_case : List[Any] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(A ) for label in labels] def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" __snake_case : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda A : [int(A ) for i in x.split()]} ) __snake_case : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] ) __snake_case : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __snake_case : Any = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
61
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
1
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class a_ ( UpperCamelCase_ ): _snake_case = 0 _snake_case = False _snake_case = 3.0 class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" self.assertDictEqual(MockClass().to_kwargs() , {}) self.assertDictEqual(MockClass(a=2).to_kwargs() , {'a': 2}) self.assertDictEqual(MockClass(a=2 , b=__a).to_kwargs() , {'a': 2, 'b': True}) self.assertDictEqual(MockClass(a=2 , c=2.25).to_kwargs() , {'a': 2, 'c': 2.25}) @require_cuda def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[Any] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2) AcceleratorState._reset_state() __snake_case : Any = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler]) print(accelerator.use_fpaa) __snake_case : List[Any] = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0) self.assertEqual(scaler._growth_factor , 2.0) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5) self.assertEqual(scaler._growth_interval , 2_0_0_0) self.assertEqual(scaler._enabled , __a) @require_multi_gpu def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)] execute_subprocess_async(__a , env=os.environ.copy()) if __name__ == "__main__": __A = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) __A = Accelerator(kwargs_handlers=[ddp_scaler]) __A = torch.nn.Linear(1_0_0, 2_0_0) __A = accelerator.prepare(model) # Check the values changed in kwargs __A = '''''' __A = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
61
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class a_ ( UpperCamelCase_ ): _snake_case = ["""image_processor""", """tokenizer"""] _snake_case = """BlipImageProcessor""" _snake_case = """AutoTokenizer""" def __init__(self , __a , __a , __a) -> Optional[Any]: """simple docstring""" super().__init__(__a , __a) # add QFormer tokenizer __snake_case : Optional[int] = qformer_tokenizer def __call__(self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError('You have to specify at least images or text.') __snake_case : Dict = BatchFeature() if text is not None: __snake_case : Optional[Any] = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) encoding.update(__a) __snake_case : Dict = self.qformer_tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) __snake_case : Optional[Any] = qformer_text_encoding.pop('input_ids') __snake_case : List[str] = qformer_text_encoding.pop('attention_mask') if images is not None: __snake_case : Optional[int] = self.image_processor(__a , return_tensors=__a) encoding.update(__a) return encoding def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*__a , **__a) def SCREAMING_SNAKE_CASE__ (self , *__a , **__a) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*__a , **__a) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Union[str, Any] = self.tokenizer.model_input_names __snake_case : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def SCREAMING_SNAKE_CASE__ (self , __a , **__a) -> List[Any]: """simple docstring""" if os.path.isfile(__a): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""") os.makedirs(__a , exist_ok=__a) __snake_case : str = os.path.join(__a , 'qformer_tokenizer') self.qformer_tokenizer.save_pretrained(__a) return super().save_pretrained(__a , **__a) @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> int: """simple docstring""" __snake_case : int = AutoTokenizer.from_pretrained(__a , subfolder='qformer_tokenizer') __snake_case : Dict = cls._get_arguments_from_pretrained(__a , **__a) args.append(__a) return cls(*__a)
61
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __A = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a_ ( UpperCamelCase_ ): def __init__(self , __a = 1_0_1) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = length def __len__(self) -> List[str]: """simple docstring""" return self.length def __getitem__(self , __a) -> int: """simple docstring""" return i class a_ : def __call__(self , __a) -> List[str]: """simple docstring""" return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)} class a_ ( nn.Module ): def __init__(self) -> Union[str, Any]: """simple docstring""" super().__init__() # Add some (unused) params otherwise DDP will complain. __snake_case : List[str] = nn.Linear(1_2_0 , 8_0) def SCREAMING_SNAKE_CASE__ (self , __a , __a=None) -> str: """simple docstring""" if labels is not None: return torch.tensor(0.0 , device=input_ids.device), input_ids else: return input_ids class a_ ( UpperCamelCase_ ): @require_torch_neuroncore def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Any = F"""--output_dir {output_dir}""".split() __snake_case : Union[str, Any] = ['torchrun'] + distributed_args + args execute_subprocess_async(__a , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class a_ ( UpperCamelCase_ ): @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Dict = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Optional[Any] = F"""--output_dir {output_dir}""".split() __snake_case : str = ['torchrun'] + distributed_args + args execute_subprocess_async(__a , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __A = HfArgumentParser((TrainingArguments,)) __A = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: __A = DummyDataset(dataset_length) def _SCREAMING_SNAKE_CASE ( A : EvalPrediction ) -> Dict: """simple docstring""" __snake_case : str = list(range(len(A ) ) ) __snake_case : Dict = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __A = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = 2 __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = None
61
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A = logging.get_logger(__name__) if is_vision_available(): import PIL class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , __a = True , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : int = size if size is not None else {'shortest_edge': 2_2_4} __snake_case : str = get_size_dict(__a , default_to_square=__a) __snake_case : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : str = get_size_dict(__a , default_to_square=__a , param_name='crop_size') __snake_case : Any = do_resize __snake_case : Optional[int] = size __snake_case : int = resample __snake_case : List[Any] = do_center_crop __snake_case : Tuple = crop_size __snake_case : Union[str, Any] = do_rescale __snake_case : int = rescale_factor __snake_case : Tuple = do_normalize __snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __snake_case : Any = image_std if image_std is not None else OPENAI_CLIP_STD __snake_case : int = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : List[str] = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") __snake_case : str = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a) return resize(__a , size=__a , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Union[str, Any] = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""") return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> int: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize __snake_case : List[Any] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , param_name='size' , default_to_square=__a) __snake_case : Optional[int] = resample if resample is not None else self.resample __snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : str = crop_size if crop_size is not None else self.crop_size __snake_case : Optional[int] = get_size_dict(__a , param_name='crop_size' , default_to_square=__a) __snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Tuple = image_mean if image_mean is not None else self.image_mean __snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std __snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __snake_case : Any = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: __snake_case : str = [convert_to_rgb(__a) for image in images] # All transformations expect numpy arrays. __snake_case : List[Any] = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a) for image in images] if do_center_crop: __snake_case : Union[str, Any] = [self.center_crop(image=__a , size=__a) for image in images] if do_rescale: __snake_case : Any = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : int = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Tuple = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
1
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _SCREAMING_SNAKE_CASE ( A : str , A : str , **A : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = AutoConfig.from_pretrained(A , **A ) __snake_case : Any = AutoModelForSeqaSeqLM.from_config(A ) model.save_pretrained(A ) AutoTokenizer.from_pretrained(A ).save_pretrained(A ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
61
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a = True , __a = None , __a = 3_2 , __a = True , __a = 1 / 2_5_5 , __a = True , __a = True , __a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , __a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , __a = True , __a=7 , __a=3_0 , __a=4_0_0 , __a=3 , ) -> Dict: """simple docstring""" __snake_case : List[str] = parent __snake_case : List[Any] = do_resize __snake_case : List[Any] = size if size is not None else {'shortest_edge': 2_8_8} __snake_case : Any = size_divisor __snake_case : List[str] = do_rescale __snake_case : List[str] = rescale_factor __snake_case : List[Any] = do_normalize __snake_case : List[str] = do_center_crop __snake_case : str = image_mean __snake_case : Optional[Any] = image_std __snake_case : int = do_pad __snake_case : List[str] = batch_size __snake_case : Dict = num_channels __snake_case : Dict = min_resolution __snake_case : List[str] = max_resolution def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def SCREAMING_SNAKE_CASE__ (self , __a , __a=False) -> Tuple: """simple docstring""" if not batched: __snake_case : int = self.size['shortest_edge'] __snake_case : Dict = image_inputs[0] if isinstance(__a , Image.Image): __snake_case ,__snake_case : List[Any] = image.size else: __snake_case ,__snake_case : Optional[Any] = image.shape[1], image.shape[2] __snake_case : List[Any] = size / min(__a , __a) if h < w: __snake_case ,__snake_case : Tuple = size, scale * w else: __snake_case ,__snake_case : List[Any] = scale * h, size __snake_case : Dict = int((1_3_3_3 / 8_0_0) * size) if max(__a , __a) > max_size: __snake_case : Union[str, Any] = max_size / max(__a , __a) __snake_case : List[str] = newh * scale __snake_case : List[Any] = neww * scale __snake_case ,__snake_case : List[str] = int(newh + 0.5), int(neww + 0.5) __snake_case ,__snake_case : List[str] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __snake_case : List[Any] = [] for image in image_inputs: __snake_case ,__snake_case : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __snake_case : Tuple = max(__a , key=lambda __a: item[0])[0] __snake_case : List[str] = max(__a , key=lambda __a: item[1])[1] return expected_height, expected_width @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = BridgeTowerImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Any = BridgeTowerImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'size_divisor')) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case ,__snake_case : Optional[int] = self.image_processor_tester.get_expected_values(__a) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case : str = image_processing(__a , return_tensors='pt').pixel_values __snake_case ,__snake_case : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case ,__snake_case : List[str] = self.image_processor_tester.get_expected_values(__a) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values __snake_case ,__snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(__a , batched=__a) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case ,__snake_case : Dict = self.image_processor_tester.get_expected_values(__a) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values __snake_case ,__snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(__a , batched=__a) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
61
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = 'ZinengTang/tvlt-base' __snake_case : Optional[Any] = tempfile.mkdtemp() def SCREAMING_SNAKE_CASE__ (self , **__a) -> Optional[Any]: """simple docstring""" return TvltImageProcessor.from_pretrained(self.checkpoint , **__a) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Tuple: """simple docstring""" return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__a) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[int] = self.get_feature_extractor() __snake_case : Optional[int] = TvltProcessor(image_processor=__a , feature_extractor=__a) processor.save_pretrained(self.tmpdirname) __snake_case : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.feature_extractor , __a) self.assertIsInstance(processor.image_processor , __a) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : Optional[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : Dict = np.ones([1_2_0_0_0]) __snake_case : Tuple = feature_extractor(__a , return_tensors='np') __snake_case : int = processor(audio=__a , return_tensors='np') for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : Optional[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : List[str] = np.ones([3, 2_2_4, 2_2_4]) __snake_case : Union[str, Any] = image_processor(__a , return_tensors='np') __snake_case : Optional[Any] = processor(images=__a , return_tensors='np') for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.get_image_processor() __snake_case : Optional[Any] = self.get_feature_extractor() __snake_case : Dict = TvltProcessor(image_processor=__a , feature_extractor=__a) __snake_case : Tuple = np.ones([1_2_0_0_0]) __snake_case : Union[str, Any] = np.ones([3, 2_2_4, 2_2_4]) __snake_case : List[Any] = processor(audio=__a , images=__a) self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask']) # test if it raises when no input is passed with pytest.raises(__a): processor() def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : int = self.get_image_processor() __snake_case : Union[str, Any] = self.get_feature_extractor() __snake_case : List[Any] = TvltProcessor(image_processor=__a , feature_extractor=__a) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
61
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
1
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = PhobertTokenizer _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Optional[int] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] __snake_case : Optional[int] = dict(zip(__a , range(len(__a)))) __snake_case : Any = ['#version: 0.2', 'l à</w>'] __snake_case : Tuple = {'unk_token': '<unk>'} __snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""") with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , **__a) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map) return PhobertTokenizer.from_pretrained(self.tmpdirname , **__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> Any: """simple docstring""" __snake_case : List[str] = 'Tôi là VinAI Research' __snake_case : Optional[int] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : str = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) __snake_case : int = 'Tôi là VinAI Research' __snake_case : Optional[Any] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() __snake_case : str = tokenizer.tokenize(__a) print(__a) self.assertListEqual(__a , __a) __snake_case : List[Any] = tokens + [tokenizer.unk_token] __snake_case : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a)
61
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
1
'''simple docstring''' import re from filelock import FileLock try: import nltk __A = True except (ImportError, ModuleNotFoundError): __A = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def _SCREAMING_SNAKE_CASE ( A : str ) -> str: """simple docstring""" re.sub('<n>' , '' , A ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(A ) )
61
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Optional[Any]=False ) -> Optional[Any]: """simple docstring""" __snake_case : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __snake_case : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _SCREAMING_SNAKE_CASE ( A : str , A : Dict , A : Optional[Any]=False ) -> int: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: __snake_case : Optional[int] = '' else: __snake_case : List[str] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __snake_case : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) __snake_case : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __snake_case : Any = in_proj_weight[ : config.hidden_size, : ] __snake_case : Tuple = in_proj_bias[: config.hidden_size] __snake_case : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __snake_case : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __snake_case : List[Any] = in_proj_weight[ -config.hidden_size :, : ] __snake_case : Optional[int] = in_proj_bias[-config.hidden_size :] def _SCREAMING_SNAKE_CASE ( A : str ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : str , A : Optional[Any] ) -> List[Any]: """simple docstring""" __snake_case : List[str] = dct.pop(A ) __snake_case : List[str] = val def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' __snake_case : Optional[Any] = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Union[str, Any] , A : Dict=True ) -> int: """simple docstring""" __snake_case : Optional[int] = ViTConfig() # patch_size if model_name[-1] == "8": __snake_case : Optional[int] = 8 # set labels if required if not base_model: __snake_case : Any = 10_00 __snake_case : Tuple = 'huggingface/label-files' __snake_case : str = 'imagenet-1k-id2label.json' __snake_case : int = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : Tuple = idalabel __snake_case : Optional[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __snake_case : str = 3_84 __snake_case : Dict = 15_36 __snake_case : Tuple = 12 __snake_case : Dict = 6 # load original model from torch hub __snake_case : List[str] = torch.hub.load('facebookresearch/dino:main' , A ) original_model.eval() # load state_dict of original model, remove and rename some keys __snake_case : List[Any] = original_model.state_dict() if base_model: remove_classification_head_(A ) __snake_case : Any = create_rename_keys(A , base_model=A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A , A ) # load HuggingFace model if base_model: __snake_case : Union[str, Any] = ViTModel(A , add_pooling_layer=A ).eval() else: __snake_case : Optional[Any] = ViTForImageClassification(A ).eval() model.load_state_dict(A ) # Check outputs on an image, prepared by ViTImageProcessor __snake_case : Tuple = ViTImageProcessor() __snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) __snake_case : List[Any] = encoding['pixel_values'] __snake_case : Optional[Any] = model(A ) if base_model: __snake_case : List[Any] = original_model(A ) assert torch.allclose(A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: __snake_case : str = original_model(A ) assert logits.shape == outputs.logits.shape assert torch.allclose(A , outputs.logits , atol=1e-3 ) Path(A ).mkdir(exist_ok=A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''dino_vitb16''', type=str, help='''Name of the model trained with DINO you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether to only convert the base model (no projection head weights).''', ) parser.set_defaults(base_model=True) __A = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class a_ ( unittest.TestCase ): def __init__(self , __a , __a=1_3 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=9_9 , __a=3_2 , __a=5 , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_1_2 , __a=1_6 , __a=2 , __a=0.02 , __a=4 , ) -> Dict: """simple docstring""" __snake_case : List[Any] = parent __snake_case : Union[str, Any] = batch_size __snake_case : int = seq_length __snake_case : Optional[int] = is_training __snake_case : Dict = use_attention_mask __snake_case : List[str] = use_token_type_ids __snake_case : Tuple = use_labels __snake_case : List[str] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[Any] = hidden_act __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : List[str] = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : Union[str, Any] = type_vocab_size __snake_case : Any = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Tuple = num_choices def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case : List[Any] = None if self.use_attention_mask: __snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length]) __snake_case : Optional[Any] = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case : List[str] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : List[str] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case ,__snake_case : Tuple = config_and_inputs __snake_case : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case ,__snake_case : int = config_and_inputs __snake_case : int = True __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = True _snake_case = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : str = FlaxRobertaPreLayerNormModelTester(self) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : str = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__a) __snake_case : List[Any] = model(np.ones((1, 1))) self.assertIsNotNone(__a) @require_flax class a_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__a) __snake_case : Optional[int] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) __snake_case : List[str] = model(__a)[0] __snake_case : Dict = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape) , __a) # compare the actual values for a slice. __snake_case : Optional[Any] = np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__a) __snake_case : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa) __snake_case : List[str] = model(__a)[0] # compare the actual values for a slice. __snake_case : Tuple = np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=1E-4))
61
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : Any , A : int , A : List[str] , A : List[str] , A : Tuple , A : List[Any] ) -> Tuple: """simple docstring""" if index == r: for j in range(A ): print(data[j] , end=' ' ) print(' ' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : str = arr[i] combination_util(A , A , A , index + 1 , A , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(A , A , A , A , A , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Any , A : str ) -> Optional[int]: """simple docstring""" # A temporary array to store all combination one by one __snake_case : Dict = [0] * r # Print all combination using temporary array 'data[]' combination_util(A , A , A , 0 , A , 0 ) if __name__ == "__main__": # Driver code to check the function above __A = [1_0, 2_0, 3_0, 4_0, 5_0] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
61
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = {'''vocab_file''': '''sentencepiece.model'''} __A = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } __A = { '''google/rembert''': 2_5_6, } class a_ ( UpperCamelCase_ ): _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self , __a , __a=False , __a=True , __a=True , __a="[CLS]" , __a="[SEP]" , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , **__a , ) -> Optional[Any]: """simple docstring""" super().__init__( do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , ) __snake_case : Optional[int] = do_lower_case __snake_case : Tuple = remove_space __snake_case : Tuple = keep_accents __snake_case : List[str] = vocab_file __snake_case : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(__a) @property def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" return len(self.sp_model) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self) -> List[Any]: """simple docstring""" __snake_case : Any = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__(self , __a) -> Dict: """simple docstring""" __snake_case : Any = d __snake_case : List[Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def SCREAMING_SNAKE_CASE__ (self , __a , __a=False) -> List[str]: """simple docstring""" __snake_case : List[str] = self.sp_model.EncodeAsPieces(__a) return pieces def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" return self.sp_model.PieceToId(__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" return self.sp_model.IdToPiece(__a) def SCREAMING_SNAKE_CASE__ (self , __a) -> Union[str, Any]: """simple docstring""" __snake_case : Dict = self.sp_model.decode_pieces(__a) return out_string def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]: """simple docstring""" __snake_case : Any = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1] return [1] + ([0] * len(__a)) + [1] def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[int]: """simple docstring""" __snake_case : List[Any] = [self.sep_token_id] __snake_case : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__a): logger.error('Vocabulary path ({}) should be a directory'.format(__a)) return __snake_case : Optional[Any] = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__a): copyfile(self.vocab_file , __a) return (out_vocab_file,)
61
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
1
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Tuple: """simple docstring""" __snake_case : Any = VideoMAEConfig() set_architecture_configs(A , A ) if "finetuned" not in model_name: __snake_case : List[Any] = False if "finetuned" in model_name: __snake_case : Any = 'huggingface/label-files' if "kinetics" in model_name: __snake_case : List[Any] = 4_00 __snake_case : Union[str, Any] = 'kinetics400-id2label.json' elif "ssv2" in model_name: __snake_case : Any = 1_74 __snake_case : Union[str, Any] = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) __snake_case : Dict = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : List[str] = {int(A ): v for k, v in idalabel.items()} __snake_case : Optional[int] = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Optional[Any] ) -> Optional[int]: """simple docstring""" if "small" in model_name: __snake_case : Optional[int] = 3_84 __snake_case : Optional[int] = 15_36 __snake_case : List[Any] = 12 __snake_case : Dict = 16 __snake_case : List[Any] = 12 __snake_case : int = 3 __snake_case : Union[str, Any] = 1_92 __snake_case : Union[str, Any] = 7_68 elif "large" in model_name: __snake_case : Optional[Any] = 10_24 __snake_case : Union[str, Any] = 40_96 __snake_case : str = 24 __snake_case : Union[str, Any] = 16 __snake_case : List[Any] = 12 __snake_case : Dict = 8 __snake_case : Dict = 5_12 __snake_case : Dict = 20_48 elif "huge" in model_name: __snake_case : int = 12_80 __snake_case : List[Any] = 51_20 __snake_case : Any = 32 __snake_case : Dict = 16 __snake_case : Optional[int] = 12 __snake_case : int = 8 __snake_case : List[str] = 6_40 __snake_case : List[Any] = 25_60 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> List[str]: """simple docstring""" if "encoder." in name: __snake_case : List[str] = name.replace('encoder.' , '' ) if "cls_token" in name: __snake_case : str = name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: __snake_case : Optional[int] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: __snake_case : Optional[Any] = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: __snake_case : List[Any] = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case : List[Any] = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: __snake_case : Any = name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: __snake_case : Dict = name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: __snake_case : Any = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: __snake_case : Dict = name.replace('attn' , 'attention.self' ) if "attn" in name: __snake_case : Optional[Any] = name.replace('attn' , 'attention.attention' ) if "norm1" in name: __snake_case : List[str] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case : Dict = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case : List[Any] = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: __snake_case : Union[str, Any] = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: __snake_case : Optional[Any] = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: __snake_case : str = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __snake_case : Dict = name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __snake_case : Dict = name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: __snake_case : List[str] = name.replace('head' , 'classifier' ) return name def _SCREAMING_SNAKE_CASE ( A : List[str] , A : Optional[int] ) -> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): __snake_case : Dict = orig_state_dict.pop(A ) if key.startswith('encoder.' ): __snake_case : Optional[Any] = key.replace('encoder.' , '' ) if "qkv" in key: __snake_case : int = key.split('.' ) if key.startswith('decoder.blocks' ): __snake_case : Any = config.decoder_hidden_size __snake_case : Dict = int(key_split[2] ) __snake_case : int = 'decoder.decoder_layers.' if "weight" in key: __snake_case : Any = val[:dim, :] __snake_case : Dict = val[dim : dim * 2, :] __snake_case : List[Any] = val[-dim:, :] else: __snake_case : List[Any] = config.hidden_size __snake_case : List[str] = int(key_split[1] ) __snake_case : List[str] = 'videomae.encoder.layer.' if "weight" in key: __snake_case : int = val[:dim, :] __snake_case : Union[str, Any] = val[dim : dim * 2, :] __snake_case : Tuple = val[-dim:, :] else: __snake_case : Any = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) __snake_case : Dict = np.load(A ) return list(A ) def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] , A : List[Any] , A : Dict , A : List[Any] ) -> int: """simple docstring""" __snake_case : str = get_videomae_config(A ) if "finetuned" in model_name: __snake_case : List[Any] = VideoMAEForVideoClassification(A ) else: __snake_case : Any = VideoMAEForPreTraining(A ) # download original checkpoint, hosted on Google Drive __snake_case : Optional[int] = 'pytorch_model.bin' gdown.cached_download(A , A , quiet=A ) __snake_case : List[str] = torch.load(A , map_location='cpu' ) if "model" in files: __snake_case : Optional[int] = files['model'] else: __snake_case : Optional[int] = files['module'] __snake_case : Optional[Any] = convert_state_dict(A , A ) model.load_state_dict(A ) model.eval() # verify model on basic input __snake_case : List[str] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __snake_case : Union[str, Any] = prepare_video() __snake_case : Optional[Any] = image_processor(A , return_tensors='pt' ) if "finetuned" not in model_name: __snake_case : Optional[int] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) __snake_case : int = torch.load(A ) __snake_case : Optional[Any] = model(**A ) __snake_case : List[str] = outputs.logits __snake_case : Optional[int] = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __snake_case : Dict = torch.Size([1, 4_00] ) __snake_case : List[Any] = torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": __snake_case : Optional[Any] = torch.Size([1, 1_74] ) __snake_case : Tuple = torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": __snake_case : List[str] = torch.Size([1, 14_08, 15_36] ) __snake_case : Optional[int] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": __snake_case : Optional[Any] = torch.Size([1, 14_08, 15_36] ) __snake_case : str = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one __snake_case : List[str] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": __snake_case : List[str] = torch.Size([1, 14_08, 15_36] ) __snake_case : Dict = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": __snake_case : Optional[Any] = torch.Size([1, 4_00] ) __snake_case : Optional[Any] = torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": __snake_case : int = torch.Size([1, 4_00] ) __snake_case : Optional[Any] = torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": __snake_case : Tuple = torch.Size([1, 4_00] ) __snake_case : Optional[Any] = torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": __snake_case : Optional[Any] = torch.Size([1, 4_00] ) __snake_case : int = torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": __snake_case : Tuple = torch.Size([1, 14_08, 15_36] ) __snake_case : Dict = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __snake_case : Dict = torch.Size([1, 1_74] ) __snake_case : Tuple = torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": __snake_case : List[Any] = torch.Size([1, 14_08, 15_36] ) __snake_case : str = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": __snake_case : Optional[int] = torch.Size([1, 1_74] ) __snake_case : Any = torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F"""Model name not supported. Should be one of {model_names}""" ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , A , atol=1e-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , A , atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": __snake_case : Dict = outputs.loss assert torch.allclose(A , A , atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A ) model.save_pretrained(A ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(A , organization='nielsr' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''', type=str, help=( '''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct''' ''' download link.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''/Users/nielsrogge/Documents/VideoMAE/Test''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __A = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __A = logging.get_logger(__name__) __A = { '''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class a_ ( UpperCamelCase_ ): _snake_case = """mctct""" def __init__(self , __a=8_0_6_5 , __a=1_5_3_6 , __a=3_6 , __a=6_1_4_4 , __a=4 , __a=3_8_4 , __a=9_2_0 , __a=1E-5 , __a=0.3 , __a="relu" , __a=0.02 , __a=0.3 , __a=0.3 , __a=1 , __a=0 , __a=2 , __a=1 , __a=0.3 , __a=1 , __a=(7,) , __a=(3,) , __a=8_0 , __a=1 , __a=None , __a="sum" , __a=False , **__a , ) -> Dict: """simple docstring""" super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a) __snake_case : Dict = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : Tuple = num_hidden_layers __snake_case : Tuple = intermediate_size __snake_case : Tuple = num_attention_heads __snake_case : Optional[int] = attention_head_dim __snake_case : Dict = max_position_embeddings __snake_case : int = layer_norm_eps __snake_case : List[str] = layerdrop __snake_case : Dict = hidden_act __snake_case : List[Any] = initializer_range __snake_case : str = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Tuple = pad_token_id __snake_case : Union[str, Any] = bos_token_id __snake_case : Tuple = eos_token_id __snake_case : Any = conv_glu_dim __snake_case : Dict = conv_dropout __snake_case : Dict = num_conv_layers __snake_case : int = input_feat_per_channel __snake_case : Optional[int] = input_channels __snake_case : Dict = conv_channels __snake_case : List[str] = ctc_loss_reduction __snake_case : Optional[Any] = ctc_zero_infinity # prevents config testing fail with exporting to json __snake_case : Tuple = list(__a) __snake_case : Any = list(__a) if len(self.conv_kernel) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """ F"""`config.num_conv_layers = {self.num_conv_layers}`.""")
61
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
1
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = VideoToVideoSDPipeline _snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""} _snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""} _snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""} _snake_case = False # No `output_type`. _snake_case = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" torch.manual_seed(0) __snake_case : int = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) __snake_case : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0) __snake_case : Dict = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) __snake_case : int = CLIPTextModel(__a) __snake_case : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __snake_case : Optional[Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE__ (self , __a , __a=0) -> Tuple: """simple docstring""" __snake_case : Optional[int] = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__a)).to(__a) if str(__a).startswith('mps'): __snake_case : int = torch.manual_seed(__a) else: __snake_case : List[Any] = torch.Generator(device=__a).manual_seed(__a) __snake_case : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : str = self.get_dummy_components() __snake_case : List[Any] = VideoToVideoSDPipeline(**__a) __snake_case : Tuple = sd_pipe.to(__a) sd_pipe.set_progress_bar_config(disable=__a) __snake_case : Dict = self.get_dummy_inputs(__a) __snake_case : Dict = 'np' __snake_case : Tuple = sd_pipe(**__a).frames __snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (3_2, 3_2, 3) __snake_case : List[str] = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5E-3) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.') def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.') def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return super().test_progress_bar() @slow @skip_mps class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa) pipe.enable_model_cpu_offload() # 10 frames __snake_case : List[Any] = torch.Generator(device='cpu').manual_seed(0) __snake_case : List[str] = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__a) __snake_case : Dict = video.to('cuda') __snake_case : Optional[int] = 'Spiderman is surfing' __snake_case : List[str] = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type='pt').frames __snake_case : Tuple = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656]) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1E-2
61
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list[int] , A : list[int] , A : int ) -> bool: """simple docstring""" return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(A ) ) def _SCREAMING_SNAKE_CASE ( A : list[list[int]] , A : int , A : list[int] , A : int ) -> bool: """simple docstring""" # Base Case if index == len(A ): return True # Recursive Step for i in range(A ): if valid_coloring(graph[index] , A , A ): # Color current vertex __snake_case : int = i # Validate coloring if util_color(A , A , A , index + 1 ): return True # Backtrack __snake_case : Optional[int] = -1 return False def _SCREAMING_SNAKE_CASE ( A : list[list[int]] , A : int ) -> list[int]: """simple docstring""" __snake_case : Union[str, Any] = [-1] * len(A ) if util_color(A , A , A , 0 ): return colored_vertices return []
61
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
1
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = '''https://openaipublic.azureedge.net/jukebox/models/''' __A = { '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[Any]: """simple docstring""" if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: __snake_case : Union[str, Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: __snake_case : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: __snake_case : Optional[Any] = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: __snake_case : Dict = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: __snake_case : Optional[int] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: __snake_case : int = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __snake_case : Optional[Any] = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: __snake_case : int = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple , A : int , A : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : str = {} import re __snake_case : Any = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) __snake_case : Union[str, Any] = re.compile( R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) __snake_case : List[Any] = re.compile( R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Any = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) __snake_case : Tuple = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) __snake_case : List[str] = re.compile( R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) __snake_case : str = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(A ): __snake_case : Optional[Any] = re_encoder_block_conv_in.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) __snake_case : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" __snake_case : Dict = re_encoder_block_conv_in.sub(A , A ) elif re_encoder_block_resnet.fullmatch(A ): __snake_case : str = re_encoder_block_resnet.match(A ) __snake_case : Tuple = regex_match.groups() __snake_case : List[str] = int(groups[2] ) * 2 + int(groups[3] ) __snake_case : Optional[Any] = {'1': 1, '3': 2}[groups[-2]] __snake_case : str = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" __snake_case : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : Tuple = prefix + resnet_block __snake_case : Optional[Any] = re_encoder_block_resnet.sub(A , A ) elif re_encoder_block_proj_out.fullmatch(A ): __snake_case : Tuple = re_encoder_block_proj_out.match(A ) __snake_case : Optional[Any] = regex_match.groups() __snake_case : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" __snake_case : Union[str, Any] = re_encoder_block_proj_out.sub(A , A ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(A ): __snake_case : Dict = re_decoder_block_conv_out.match(A ) __snake_case : Union[str, Any] = regex_match.groups() __snake_case : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2 __snake_case : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" __snake_case : str = re_decoder_block_conv_out.sub(A , A ) elif re_decoder_block_resnet.fullmatch(A ): __snake_case : Any = re_decoder_block_resnet.match(A ) __snake_case : int = regex_match.groups() __snake_case : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2 __snake_case : int = {'1': 1, '3': 2}[groups[-2]] __snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" __snake_case : Dict = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : Union[str, Any] = prefix + resnet_block __snake_case : Any = re_decoder_block_resnet.sub(A , A ) elif re_decoder_block_proj_in.fullmatch(A ): __snake_case : int = re_decoder_block_proj_in.match(A ) __snake_case : Optional[Any] = regex_match.groups() __snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" __snake_case : Optional[int] = re_decoder_block_proj_in.sub(A , A ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(A ): __snake_case : str = re_prior_cond_conv_out.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __snake_case : Tuple = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" __snake_case : Tuple = re_prior_cond_conv_out.sub(A , A ) elif re_prior_cond_resnet.fullmatch(A ): __snake_case : Dict = re_prior_cond_resnet.match(A ) __snake_case : Optional[int] = regex_match.groups() __snake_case : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 __snake_case : Any = {'1': 1, '3': 2}[groups[-2]] __snake_case : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" __snake_case : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" __snake_case : int = prefix + resnet_block __snake_case : List[str] = re_prior_cond_resnet.sub(A , A ) elif re_prior_cond_proj_in.fullmatch(A ): __snake_case : Optional[Any] = re_prior_cond_proj_in.match(A ) __snake_case : str = regex_match.groups() __snake_case : Optional[Any] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" __snake_case : Tuple = re_prior_cond_proj_in.sub(A , A ) # keep original key else: __snake_case : int = original_key __snake_case : Optional[int] = replace_key(A ) if F"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(F"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape: __snake_case : str = model_state_dict[F"""{key_prefix}.{key}"""] print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) __snake_case : List[str] = original_key __snake_case : List[Any] = original_key __snake_case : Union[str, Any] = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : str=None , A : int=None ) -> str: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ): __snake_case : Union[str, Any] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A ) os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A ) open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content ) __snake_case : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]] __snake_case : Any = JukeboxConfig.from_pretrained(A ) __snake_case : Optional[Any] = JukeboxModel(A ) __snake_case : Optional[int] = [] __snake_case : Union[str, Any] = {} for i, dict_name in enumerate(A ): __snake_case : List[Any] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model'] __snake_case : List[Any] = {} for k in old_dic.keys(): if k.endswith('.b' ): __snake_case : Any = old_dic[k] elif k.endswith('.w' ): __snake_case : List[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __snake_case : str = old_dic[k] else: __snake_case : List[Any] = old_dic[k] __snake_case : Union[str, Any] = 'vqvae' if i == 0 else F"""priors.{3 - i}""" __snake_case : Any = fix_jukebox_keys(A , model.state_dict() , A , A ) weight_dict.append(A ) __snake_case : Any = weight_dict.pop(0 ) model.vqvae.load_state_dict(A ) for i in range(len(A ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(A ).mkdir(exist_ok=A ) with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile: json.dump(A , A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) return weight_dict if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) __A = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
61
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class a_ ( _lowerCamelCase ): _snake_case = """sew""" def __init__(self , __a=3_2 , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a=2 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1E-5 , __a="group" , __a="gelu" , __a=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=1_2_8 , __a=1_6 , __a=True , __a=0.05 , __a=1_0 , __a=2 , __a=0.0 , __a=1_0 , __a=0 , __a="mean" , __a=False , __a=False , __a=2_5_6 , __a=0 , __a=1 , __a=2 , **__a , ) -> List[Any]: """simple docstring""" super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__) __snake_case : int = hidden_size __snake_case : Optional[Any] = feat_extract_norm __snake_case : Optional[Any] = feat_extract_activation __snake_case : List[str] = list(A__) __snake_case : Dict = list(A__) __snake_case : Union[str, Any] = list(A__) __snake_case : List[Any] = conv_bias __snake_case : List[str] = num_conv_pos_embeddings __snake_case : Any = num_conv_pos_embedding_groups __snake_case : Tuple = len(self.conv_dim) __snake_case : Dict = num_hidden_layers __snake_case : str = intermediate_size __snake_case : Union[str, Any] = squeeze_factor __snake_case : Optional[Any] = hidden_act __snake_case : List[Any] = num_attention_heads __snake_case : Dict = hidden_dropout __snake_case : List[str] = attention_dropout __snake_case : Optional[Any] = activation_dropout __snake_case : List[str] = feat_proj_dropout __snake_case : int = final_dropout __snake_case : Optional[int] = layerdrop __snake_case : Union[str, Any] = layer_norm_eps __snake_case : List[str] = initializer_range __snake_case : Union[str, Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : int = apply_spec_augment __snake_case : Optional[Any] = mask_time_prob __snake_case : Union[str, Any] = mask_time_length __snake_case : List[Any] = mask_time_min_masks __snake_case : Union[str, Any] = mask_feature_prob __snake_case : Tuple = mask_feature_length __snake_case : Tuple = mask_feature_min_masks # ctc loss __snake_case : Optional[Any] = ctc_loss_reduction __snake_case : str = ctc_zero_infinity # sequence classification __snake_case : Union[str, Any] = use_weighted_layer_sum __snake_case : List[str] = classifier_proj_size @property def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
700
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : Any , A : Any , A : Tuple ) -> Tuple: """simple docstring""" __snake_case : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> Optional[int]: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Optional[int]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> int: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> List[Any]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> List[str]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> str: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> List[str]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> Tuple: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Tuple: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> Tuple: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Dict: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> List[Any]: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) class a_ ( metaclass=snake_case__ ): _snake_case = ["""torch""", """transformers""", """onnx"""] def __init__(self , *__a , **__a) -> Any: """simple docstring""" requires_backends(self , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Tuple: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx']) @classmethod def SCREAMING_SNAKE_CASE__ (cls , *__a , **__a) -> Any: """simple docstring""" requires_backends(cls , ['torch', 'transformers', 'onnx'])
702
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' from __future__ import annotations import bisect def _SCREAMING_SNAKE_CASE ( A : str , A : int , A : Optional[int] = 0 , A : List[str] = -1 ) -> int: """simple docstring""" if hi < 0: __snake_case : str = len(A ) while lo < hi: __snake_case : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __snake_case : List[Any] = mid + 1 else: __snake_case : List[Any] = mid return lo def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : List[str] , A : Dict = 0 , A : Any = -1 ) -> int: """simple docstring""" if hi < 0: __snake_case : Any = len(A ) while lo < hi: __snake_case : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __snake_case : str = mid + 1 else: __snake_case : Any = mid return lo def _SCREAMING_SNAKE_CASE ( A : str , A : List[str] , A : Union[str, Any] = 0 , A : List[Any] = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_left(A , A , A , A ) , A ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Dict , A : Union[str, Any] = 0 , A : Union[str, Any] = -1 ) -> None: """simple docstring""" sorted_collection.insert(bisect_right(A , A , A , A ) , A ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : List[Any] ) -> int | None: """simple docstring""" __snake_case : Union[str, Any] = 0 __snake_case : int = len(A ) - 1 while left <= right: __snake_case : List[Any] = left + (right - left) // 2 __snake_case : Dict = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __snake_case : Dict = midpoint - 1 else: __snake_case : List[Any] = midpoint + 1 return None def _SCREAMING_SNAKE_CASE ( A : List[str] , A : Dict ) -> int | None: """simple docstring""" __snake_case : Tuple = bisect.bisect_left(A , A ) if index != len(A ) and sorted_collection[index] == item: return index return None def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[Any] , A : str , A : Optional[Any] ) -> int | None: """simple docstring""" if right < left: return None __snake_case : Union[str, Any] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(A , A , A , midpoint - 1 ) else: return binary_search_by_recursion(A , A , midpoint + 1 , A ) if __name__ == "__main__": __A = input('''Enter numbers separated by comma:\n''').strip() __A = sorted(int(item) for item in user_input.split(''',''')) __A = int(input('''Enter a single number to be found in the list:\n''')) __A = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _SCREAMING_SNAKE_CASE ( A : Any , A : int , A : List[str] , A : Tuple , A : Optional[Any] ) -> float: """simple docstring""" __snake_case : Union[str, Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowercase )] ) __snake_case : List[str] = np.array(_lowercase ) __snake_case : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowercase ) ) , x.transpose() ) , _lowercase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Dict , A : Any ) -> float: """simple docstring""" __snake_case : List[str] = (1, 2, 1) __snake_case : Dict = (1, 1, 0, 7) __snake_case : Union[str, Any] = SARIMAX( _lowercase , exog=_lowercase , order=_lowercase , seasonal_order=_lowercase ) __snake_case : str = model.fit(disp=_lowercase , maxiter=6_00 , method='nm' ) __snake_case : int = model_fit.predict(1 , len(_lowercase ) , exog=[test_match] ) return result[0] def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Any , A : str ) -> float: """simple docstring""" __snake_case : Any = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_lowercase , _lowercase ) __snake_case : int = regressor.predict(_lowercase ) return y_pred[0] def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> float: """simple docstring""" train_user.sort() __snake_case : str = np.percentile(_lowercase , 25 ) __snake_case : Union[str, Any] = np.percentile(_lowercase , 75 ) __snake_case : Optional[int] = qa - qa __snake_case : Optional[Any] = qa - (iqr * 0.1) return low_lim def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Dict ) -> bool: """simple docstring""" __snake_case : Any = 0 __snake_case : List[str] = 0 for i in list_vote: if i > actual_result: __snake_case : Optional[Any] = not_safe + 1 else: if abs(abs(_lowercase ) - abs(_lowercase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __A = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]] __A = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __A = Normalizer().fit_transform(data_input_df.values) # split data __A = normalize_df[:, 2].tolist() __A = normalize_df[:, 0].tolist() __A = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __A = normalize_df[:, [1, 2]].tolist() __A = x[: len(x) - 1] __A = x[len(x) - 1 :] # for linear regression & sarimax __A = total_date[: len(total_date) - 1] __A = total_user[: len(total_user) - 1] __A = total_match[: len(total_match) - 1] __A = total_date[len(total_date) - 1 :] __A = total_user[len(total_user) - 1 :] __A = total_match[len(total_match) - 1 :] # voting system with forecasting __A = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __A = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
704
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
0
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __A = logging.get_logger(__name__) class a_ ( lowercase__ ): def __init__(self , __a) -> Dict: """simple docstring""" super().__init__() __snake_case : List[str] = nn.ModuleList(__lowerCamelCase) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a , __a , __a = None , __a = None , __a = None , __a = None , __a = False , __a = True , ) -> Union[str, Any]: """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(__lowerCamelCase , __lowerCamelCase , self.nets)): __snake_case : int = controlnet( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) # merge samples if i == 0: __snake_case : Union[str, Any] = down_samples, mid_sample else: __snake_case : Tuple = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCamelCase , __lowerCamelCase) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def SCREAMING_SNAKE_CASE__ (self , __a , __a = True , __a = None , __a = False , __a = None , ) -> List[str]: """simple docstring""" __snake_case : Any = 0 __snake_case : Tuple = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCamelCase , is_main_process=__lowerCamelCase , save_function=__lowerCamelCase , safe_serialization=__lowerCamelCase , variant=__lowerCamelCase , ) idx += 1 __snake_case : Optional[Any] = model_path_to_save + F"""_{idx}""" @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> Any: """simple docstring""" __snake_case : Optional[int] = 0 __snake_case : List[str] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __snake_case : int = pretrained_model_path while os.path.isdir(__lowerCamelCase): __snake_case : int = ControlNetModel.from_pretrained(__lowerCamelCase , **__lowerCamelCase) controlnets.append(__lowerCamelCase) idx += 1 __snake_case : int = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(__lowerCamelCase)} controlnets loaded from {pretrained_model_path}.""") if len(__lowerCamelCase) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(__lowerCamelCase)}. Expected at least {pretrained_model_path + "_0"}.""") return cls(__lowerCamelCase)
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' class a_ : def __init__(self , __a) -> List[Any]: """simple docstring""" __snake_case : Any = set_counts __snake_case : Optional[int] = max(UpperCAmelCase_) __snake_case : Optional[int] = len(UpperCAmelCase_) __snake_case : str = [1] * num_sets __snake_case : Optional[int] = list(range(UpperCAmelCase_)) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : int = self.get_parent(UpperCAmelCase_) __snake_case : List[str] = self.get_parent(UpperCAmelCase_) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __snake_case : Dict = 0 __snake_case : Tuple = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __snake_case : Dict = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __snake_case : Optional[Any] = 0 __snake_case : int = src_parent __snake_case : str = self.set_counts[src_parent] __snake_case : str = max(self.max_set , UpperCAmelCase_) return True def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set __snake_case : Any = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
706
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __A = logging.get_logger(__name__) class a_ ( UpperCAmelCase_ ): _snake_case = ['pixel_values'] def __init__(self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 2_5_5 , __a = True , __a = IMAGENET_DEFAULT_MEAN , __a = IMAGENET_DEFAULT_STD , **__a , ) -> None: """simple docstring""" super().__init__(**_lowercase) __snake_case : Any = size if size is not None else {'shortest_edge': 2_2_4} __snake_case : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase) __snake_case : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : str = get_size_dict(_lowercase , param_name='crop_size') __snake_case : Any = do_resize __snake_case : List[Any] = size __snake_case : Tuple = resample __snake_case : Union[str, Any] = do_center_crop __snake_case : List[str] = crop_size __snake_case : Any = do_rescale __snake_case : Any = rescale_factor __snake_case : Any = do_normalize __snake_case : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __snake_case : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Optional[Any] = get_size_dict(_lowercase , default_to_square=_lowercase) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: __snake_case : Any = int((2_5_6 / 2_2_4) * size['shortest_edge']) __snake_case : Dict = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase) __snake_case : str = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""") return resize( _lowercase , size=(size_dict['height'], size_dict['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Optional[Any] = get_size_dict(_lowercase) if "height" not in size or "width" not in size: raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""") return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature: """simple docstring""" __snake_case : int = do_resize if do_resize is not None else self.do_resize __snake_case : Optional[int] = resample if resample is not None else self.resample __snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : int = do_rescale if do_rescale is not None else self.do_rescale __snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : str = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Dict = image_mean if image_mean is not None else self.image_mean __snake_case : str = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : str = get_size_dict(_lowercase , default_to_square=_lowercase) __snake_case : Optional[Any] = crop_size if crop_size is not None else self.crop_size __snake_case : Optional[Any] = get_size_dict(_lowercase , param_name='crop_size') __snake_case : Union[str, Any] = make_list_of_images(_lowercase) if not valid_images(_lowercase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Union[str, Any] = [to_numpy_array(_lowercase) for image in images] if do_resize: __snake_case : Dict = [self.resize(_lowercase , _lowercase , _lowercase) for image in images] if do_center_crop: __snake_case : Optional[int] = [self.center_crop(_lowercase , _lowercase) for image in images] if do_rescale: __snake_case : Optional[Any] = [self.rescale(_lowercase , _lowercase) for image in images] if do_normalize: __snake_case : Dict = [self.normalize(_lowercase , _lowercase , _lowercase) for image in images] __snake_case : Tuple = [to_channel_dimension_format(_lowercase , _lowercase) for image in images] __snake_case : Optional[Any] = {'pixel_values': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase)
707
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
0
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( __A , unittest.TestCase ): _snake_case = OpenAIGPTTokenizer _snake_case = OpenAIGPTTokenizerFast _snake_case = True _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __snake_case : List[str] = dict(zip(__a , range(len(__a)))) __snake_case : int = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] __snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w') as fp: fp.write(json.dumps(__a)) with open(self.merges_file , 'w') as fp: fp.write('\n'.join(__a)) def SCREAMING_SNAKE_CASE__ (self , __a) -> Any: """simple docstring""" return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file) __snake_case : Any = '''lower''' __snake_case : Dict = ['''low''', '''er</w>'''] __snake_case : int = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) __snake_case : Any = tokens + ['''<unk>'''] __snake_case : Tuple = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , __a) def SCREAMING_SNAKE_CASE__ (self , __a=1_5) -> Optional[int]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): __snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a) # Simple input __snake_case : str = '''This is a simple input''' __snake_case : int = ['''This is a simple input 1''', '''This is a simple input 2'''] __snake_case : Optional[int] = ('''This is a simple input''', '''This is a pair''') __snake_case : List[str] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length') # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class a_ ( __A ): pass
708
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _SCREAMING_SNAKE_CASE ( A : List[str] ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = [] for line in lines: __snake_case : Union[str, Any] = re.sub(R'#.*' , '' , __lowerCAmelCase ) # remove comments if line: filtered_lines.append(__lowerCAmelCase ) __snake_case : Any = '\n'.join(__lowerCAmelCase ) # Make a hash from all this code __snake_case : Union[str, Any] = full_str.encode('utf-8' ) return shaaaa(__lowerCAmelCase ).hexdigest() # get importable module names and hash for caching __A = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __A = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __A = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name __A = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" __snake_case : List[str] = 3 __snake_case : Tuple = 2_5_0 __snake_case : str = ids_tensor((batch_size, length) , UpperCamelCase_) __snake_case : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float) / length return input_ids, scores def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = self._get_tensors(5) __snake_case : Tuple = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0), MaxTimeCriteria(max_time=0.1), ]) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : int = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Optional[int] = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_)) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : int = MaxLengthCriteria(max_length=1_0) __snake_case : Tuple = self._get_tensors(5) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Dict = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Optional[int] = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_)) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5) __snake_case : List[str] = self._get_tensors(5) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Dict = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Optional[Any] = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : Union[str, Any] = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length , 1_0) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Optional[Any] = self._get_tensors(5) __snake_case : str = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_)) __snake_case : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_)) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0)]) , 1_0) with self.assertWarns(UpperCamelCase_): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0)]) , 1_1) __snake_case : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 1_1) self.assertEqual(len(UpperCamelCase_) , 1)
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( A : list[int] , A : int ) -> Union[str, Any]: """simple docstring""" if len(_lowerCAmelCase ) < k or k < 0: raise ValueError('Invalid Input' ) __snake_case : Union[str, Any] = sum(array[:k] ) for i in range(len(_lowerCAmelCase ) - k ): __snake_case : Dict = current_sum - array[i] + array[i + k] __snake_case : Any = max(_lowerCAmelCase , _lowerCAmelCase ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __A = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)] __A = randint(0, 1_1_0) print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class a_ : def __init__(self) -> Optional[int]: """simple docstring""" __snake_case : int = {} def SCREAMING_SNAKE_CASE__ (self , __a) -> None: """simple docstring""" __snake_case : List[str] = {} def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(_a) if nodea not in self.connections: self.add_node(_a) __snake_case : int = probability def SCREAMING_SNAKE_CASE__ (self) -> list[str]: """simple docstring""" return list(self.connections) def SCREAMING_SNAKE_CASE__ (self , __a) -> str: """simple docstring""" __snake_case : int = 0 __snake_case : List[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Optional[int] , A : Dict ) -> dict[str, int]: """simple docstring""" __snake_case : Any = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __snake_case : str = Counter(graph.get_nodes() ) __snake_case : Optional[Any] = start for _ in range(__lowerCAmelCase ): __snake_case : Optional[Any] = graph.transition(__lowerCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class a_ ( _snake_case ): @slow @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : int = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny') __snake_case : Tuple = BertTokenizer.from_pretrained('bert-base-uncased') __snake_case : Dict = bertabert.config.encoder.vocab_size __snake_case : int = tokenizer.sep_token_id __snake_case : List[str] = tokenizer.cls_token_id __snake_case : List[str] = 1_2_8 __snake_case : str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]') __snake_case : Optional[Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]') __snake_case : List[Any] = train_dataset.select(range(3_2)) __snake_case : List[Any] = val_dataset.select(range(1_6)) __snake_case : Union[str, Any] = 4 def _map_to_encoder_decoder_inputs(__a): # Tokenizer will automatically set [BOS] <text> [EOS] __snake_case : Optional[int] = tokenizer(batch['article'] , padding='max_length' , truncation=__a , max_length=5_1_2) __snake_case : Dict = tokenizer(batch['highlights'] , padding='max_length' , truncation=__a , max_length=1_2_8) __snake_case : List[str] = inputs.input_ids __snake_case : Optional[int] = inputs.attention_mask __snake_case : List[str] = outputs.input_ids __snake_case : Optional[int] = outputs.input_ids.copy() __snake_case : Dict = [ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] __snake_case : Union[str, Any] = outputs.attention_mask assert all(len(__a) == 5_1_2 for x in inputs.input_ids) assert all(len(__a) == 1_2_8 for x in outputs.input_ids) return batch def _compute_metrics(__a): __snake_case : int = pred.label_ids __snake_case : Optional[Any] = pred.predictions # all unnecessary tokens are removed __snake_case : List[Any] = tokenizer.batch_decode(__a , skip_special_tokens=__a) __snake_case : Tuple = tokenizer.batch_decode(__a , skip_special_tokens=__a) __snake_case : List[str] = sum([int(pred_str[i] == label_str[i]) for i in range(len(__a))]) / len(__a) return {"accuracy": accuracy} # map train dataset __snake_case : Dict = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__a , batch_size=__a , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset __snake_case : Optional[Any] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__a , batch_size=__a , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Optional[Any] = SeqaSeqTrainingArguments( output_dir=__a , per_device_train_batch_size=__a , per_device_eval_batch_size=__a , predict_with_generate=__a , evaluation_strategy='steps' , do_train=__a , do_eval=__a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __snake_case : int = SeqaSeqTrainer( model=__a , args=__a , compute_metrics=_compute_metrics , train_dataset=__a , eval_dataset=__a , tokenizer=__a , ) # start training trainer.train()
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Tuple , A : List[str] , A : List[str] ) -> List[Any]: # Return True if there is node that has not iterated. __snake_case : Any = [False] * len(lowerCAmelCase__ ) __snake_case : Any = [] queue.append(lowerCAmelCase__ ) __snake_case : Dict = True while queue: __snake_case : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowerCAmelCase__ ) __snake_case : str = True __snake_case : Optional[Any] = u return visited[t] def _SCREAMING_SNAKE_CASE ( A : Dict , A : Dict , A : str ) -> Optional[Any]: # This array is filled by BFS and to store path __snake_case : Dict = [-1] * (len(lowerCAmelCase__ )) __snake_case : List[str] = 0 while bfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __snake_case : Optional[Any] = float('Inf' ) __snake_case : Optional[Any] = sink while s != source: # Find the minimum value in select path __snake_case : List[str] = min(lowerCAmelCase__ , graph[parent[s]][s] ) __snake_case : str = parent[s] max_flow += path_flow __snake_case : Optional[int] = sink while v != source: __snake_case : Tuple = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __snake_case : List[str] = parent[v] return max_flow __A = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __A , __A = 0, 5 print(ford_fulkerson(graph, source, sink))
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _SCREAMING_SNAKE_CASE ( A : Any , A : Optional[Any] , A : Union[str, Any]=None , A : int=None , A : Union[str, Any]=None , A : List[Any]=None , A : Tuple=None , A : Union[str, Any]=None , ) -> int: """simple docstring""" if attention_mask is None: __snake_case : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __snake_case : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __snake_case : str = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class a_ : def __init__(self , __a , __a=1_3 , __a=7 , __a=True , __a=False , __a=9_9 , __a=1_6 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=3_2 , __a=2 , __a=1 , __a=0 , __a=0.02 , ) -> List[str]: """simple docstring""" __snake_case : int = parent __snake_case : Optional[Any] = batch_size __snake_case : Union[str, Any] = seq_length __snake_case : Union[str, Any] = is_training __snake_case : List[str] = use_labels __snake_case : Tuple = vocab_size __snake_case : List[str] = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : Any = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Any = max_position_embeddings __snake_case : List[Any] = eos_token_id __snake_case : List[Any] = pad_token_id __snake_case : List[Any] = bos_token_id __snake_case : Tuple = initializer_range def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size) __snake_case : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1) __snake_case : List[Any] = shift_tokens_right(_UpperCamelCase , 1 , 2) __snake_case : Optional[int] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , ) __snake_case : Union[str, Any] = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) return config, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Any: """simple docstring""" __snake_case : Optional[int] = 2_0 __snake_case : Tuple = model_class_name(_UpperCamelCase) __snake_case : Any = model.encode(inputs_dict['input_ids']) __snake_case : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __snake_case : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) __snake_case : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4') __snake_case : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __snake_case : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) __snake_case : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') __snake_case : int = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , ) __snake_case : Dict = model.decode(_UpperCamelCase , _UpperCamelCase) __snake_case : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Any: """simple docstring""" __snake_case : Optional[Any] = 2_0 __snake_case : Tuple = model_class_name(_UpperCamelCase) __snake_case : Any = model.encode(inputs_dict['input_ids']) __snake_case : List[Any] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __snake_case : List[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ] , axis=-1 , ) __snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase) __snake_case : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __snake_case : Dict = model.decode( decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) __snake_case : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4') __snake_case : int = model.decode( decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , ) __snake_case : Tuple = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase) __snake_case : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""") @require_flax class a_ ( unittest.TestCase ): _snake_case = 99 def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : int = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) __snake_case : List[Any] = input_ids.shape[0] __snake_case : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : str = self._get_config_and_data() __snake_case : Any = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase) __snake_case : List[Any] = lm_model(input_ids=_UpperCamelCase) __snake_case : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _UpperCamelCase) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Tuple = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) __snake_case : Optional[int] = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase) __snake_case : int = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa) __snake_case : Tuple = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa) __snake_case : Dict = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase) __snake_case : Any = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _UpperCamelCase) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : List[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa) __snake_case : int = shift_tokens_right(_UpperCamelCase , 1 , 2) __snake_case : Tuple = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum() __snake_case : Dict = np.equal(_UpperCamelCase , 1).astype(np.floataa).sum() self.assertEqual(shifted.shape , input_ids.shape) self.assertEqual(_UpperCamelCase , n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0] , 2).all()) @require_flax class a_ ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ): _snake_case = True _snake_case = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) _snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Any = FlaxBlenderbotModelTester(self) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __snake_case : Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase) __snake_case : List[str] = model_class(_UpperCamelCase) @jax.jit def encode_jitted(__a , __a=None , **__a): return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase) with self.subTest('JIT Enabled'): __snake_case : Dict = encode_jitted(**_UpperCamelCase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): __snake_case : str = encode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __snake_case : Any = model_class(_UpperCamelCase) __snake_case : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask']) __snake_case : Union[str, Any] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__a , __a , __a): return model.decode( decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , ) with self.subTest('JIT Enabled'): __snake_case : Dict = decode_jitted(**_UpperCamelCase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): __snake_case : Optional[int] = decode_jitted(**_UpperCamelCase).to_tuple() self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase)) for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase): self.assertEqual(jitted_output.shape , output.shape) @slow def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained('facebook/blenderbot-400M-distill') # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __snake_case : Dict = np.ones((1, 1)) * model.config.eos_token_id __snake_case : int = model(_UpperCamelCase) self.assertIsNotNone(_UpperCamelCase) @unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.') @slow def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Any = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 1_5, """max_length""": 2_5} __snake_case : Dict = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} __snake_case : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCamelCase) __snake_case : Union[str, Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B') __snake_case : List[Any] = ["""Sam"""] __snake_case : Dict = tokenizer(_UpperCamelCase , return_tensors='jax') __snake_case : Optional[int] = model.generate(**_UpperCamelCase , **_UpperCamelCase) __snake_case : Dict = """Sam is a great name. It means \"sun\" in Gaelic.""" __snake_case : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase) assert generated_txt[0].strip() == tgt_text
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a_ ( a__ ): def __init__(self , __a , __a , __a , __a = None , ) -> Optional[int]: """simple docstring""" super().__init__() self.register_modules(transformer=_A , vae=_A , scheduler=_A) # create a imagenet -> id dictionary for easier use __snake_case : Union[str, Any] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(','): __snake_case : Tuple = int(_A) __snake_case : Dict = dict(sorted(self.labels.items())) def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[Any]: """simple docstring""" if not isinstance(_A , _A): __snake_case : Tuple = list(_A) for l in label: if l not in self.labels: raise ValueError( F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""") return [self.labels[l] for l in label] @torch.no_grad() def __call__(self , __a , __a = 4.0 , __a = None , __a = 5_0 , __a = "pil" , __a = True , ) -> List[Any]: """simple docstring""" __snake_case : str = len(_A) __snake_case : List[Any] = self.transformer.config.sample_size __snake_case : Dict = self.transformer.config.in_channels __snake_case : str = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , ) __snake_case : str = torch.cat([latents] * 2) if guidance_scale > 1 else latents __snake_case : Optional[Any] = torch.tensor(_A , device=self.device).reshape(-1) __snake_case : str = torch.tensor([1_0_0_0] * batch_size , device=self.device) __snake_case : Dict = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): if guidance_scale > 1: __snake_case : int = latent_model_input[: len(_A) // 2] __snake_case : int = torch.cat([half, half] , dim=0) __snake_case : Union[str, Any] = self.scheduler.scale_model_input(_A , _A) __snake_case : Union[str, Any] = t if not torch.is_tensor(_A): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __snake_case : Union[str, Any] = latent_model_input.device.type == 'mps' if isinstance(_A , _A): __snake_case : List[str] = torch.floataa if is_mps else torch.floataa else: __snake_case : Union[str, Any] = torch.intaa if is_mps else torch.intaa __snake_case : Optional[Any] = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device) elif len(timesteps.shape) == 0: __snake_case : Any = timesteps[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __snake_case : Dict = timesteps.expand(latent_model_input.shape[0]) # predict noise model_output __snake_case : Optional[int] = self.transformer( _A , timestep=_A , class_labels=_A).sample # perform guidance if guidance_scale > 1: __snake_case : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __snake_case : Optional[Any] = torch.split(_A , len(_A) // 2 , dim=0) __snake_case : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __snake_case : int = torch.cat([half_eps, half_eps] , dim=0) __snake_case : List[Any] = torch.cat([eps, rest] , dim=1) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __snake_case : Any = torch.split(_A , _A , dim=1) else: __snake_case : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 __snake_case : Tuple = self.scheduler.step(_A , _A , _A).prev_sample if guidance_scale > 1: __snake_case : Union[str, Any] = latent_model_input.chunk(2 , dim=0) else: __snake_case : int = latent_model_input __snake_case : List[str] = 1 / self.vae.config.scaling_factor * latents __snake_case : Dict = self.vae.decode(_A).sample __snake_case : int = (samples / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case : List[str] = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case : List[str] = self.numpy_to_pil(_A) if not return_dict: return (samples,) return ImagePipelineOutput(images=_A)
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" debug_launcher(test_script.main) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" debug_launcher(test_ops.main)
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __A = get_logger(__name__) class a_ : def __init__(self , __a , __a=None) -> int: """simple docstring""" __snake_case : str = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__'): setattr(self , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_)) __snake_case : Tuple = module._original_module if isinstance(lowerCamelCase_ , _PatchedModuleObj) else module class a_ : _snake_case = [] def __init__(self , __a , __a , __a , __a=None) -> int: """simple docstring""" __snake_case : Optional[int] = obj __snake_case : Optional[Any] = target __snake_case : List[str] = new __snake_case : str = target.split('.')[0] __snake_case : Optional[int] = {} __snake_case : Any = attrs or [] def __enter__(self) -> Tuple: """simple docstring""" __snake_case : Any = self.target.split('.') # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCamelCase_)): try: __snake_case : Any = import_module('.'.join(submodules[: i + 1])) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __snake_case : List[str] = getattr(self.obj , lowerCamelCase_) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCamelCase_ , _PatchedModuleObj) and obj_attr._original_module is submodule) ): __snake_case : List[Any] = obj_attr # patch at top level setattr(self.obj , lowerCamelCase_ , _PatchedModuleObj(lowerCamelCase_ , attrs=self.attrs)) __snake_case : Union[str, Any] = getattr(self.obj , lowerCamelCase_) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCamelCase_ , lowerCamelCase_ , _PatchedModuleObj(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , attrs=self.attrs)) __snake_case : Union[str, Any] = getattr(lowerCamelCase_ , lowerCamelCase_) # finally set the target attribute setattr(lowerCamelCase_ , lowerCamelCase_ , self.new) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __snake_case : Any = getattr(import_module('.'.join(lowerCamelCase_)) , lowerCamelCase_) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCamelCase_) is attr_value: __snake_case : Any = getattr(self.obj , lowerCamelCase_) setattr(self.obj , lowerCamelCase_ , self.new) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __snake_case : Union[str, Any] = globals()['''__builtins__'''][target_attr] setattr(self.obj , lowerCamelCase_ , self.new) else: raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""") def __exit__(self , *__a) -> List[Any]: """simple docstring""" for attr in list(self.original): setattr(self.obj , lowerCamelCase_ , self.original.pop(lowerCamelCase_)) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" self.__enter__() self._active_patches.append(self) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" try: self._active_patches.remove(self) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __A = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def _SCREAMING_SNAKE_CASE ( A : Any , A : Dict ) -> List[Any]: """simple docstring""" warnings.warn(_A , _A ) requires_backends(_A , 'sklearn' ) return (preds == labels).mean() def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Any ) -> List[Any]: """simple docstring""" warnings.warn(_A , _A ) requires_backends(_A , 'sklearn' ) __snake_case : Optional[int] = simple_accuracy(_A , _A ) __snake_case : Dict = fa_score(y_true=_A , y_pred=_A ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Tuple ) -> List[str]: """simple docstring""" warnings.warn(_A , _A ) requires_backends(_A , 'sklearn' ) __snake_case : Optional[Any] = pearsonr(_A , _A )[0] __snake_case : Optional[int] = spearmanr(_A , _A )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _SCREAMING_SNAKE_CASE ( A : Any , A : List[Any] , A : Optional[Any] ) -> Optional[Any]: """simple docstring""" warnings.warn(_A , _A ) requires_backends(_A , 'sklearn' ) assert len(_A ) == len(_A ), F"""Predictions and labels have mismatched lengths {len(_A )} and {len(_A )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(_A , _A )} elif task_name == "sst-2": return {"acc": simple_accuracy(_A , _A )} elif task_name == "mrpc": return acc_and_fa(_A , _A ) elif task_name == "sts-b": return pearson_and_spearman(_A , _A ) elif task_name == "qqp": return acc_and_fa(_A , _A ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_A , _A )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_A , _A )} elif task_name == "qnli": return {"acc": simple_accuracy(_A , _A )} elif task_name == "rte": return {"acc": simple_accuracy(_A , _A )} elif task_name == "wnli": return {"acc": simple_accuracy(_A , _A )} elif task_name == "hans": return {"acc": simple_accuracy(_A , _A )} else: raise KeyError(_A ) def _SCREAMING_SNAKE_CASE ( A : str , A : Tuple , A : Any ) -> Optional[int]: """simple docstring""" warnings.warn(_A , _A ) requires_backends(_A , 'sklearn' ) if len(_A ) != len(_A ): raise ValueError(F"""Predictions and labels have mismatched lengths {len(_A )} and {len(_A )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(_A , _A )} else: raise KeyError(_A )
719
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification __A = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co __A = '''main''' # Default branch name __A = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2''' # One particular commit (not the top of `main`) __A = '''aaaaaaa''' # This commit does not exist, so we should 404. __A = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684''' # Sha-1 of config.json on the top of `main`, for checking purposes __A = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3''' @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" print('Bonjour!' ) yield print('Au revoir!' ) class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers') is not None class a_ ( unittest.TestCase ): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" with ContextManagers([]): print('Transformers are awesome!') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> List[str]: """simple docstring""" with ContextManagers([context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n') @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO) def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple: """simple docstring""" with ContextManagers([context_fr(), context_en()]): print('Transformers are awesome!') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') @require_torch def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_tf def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.assertEqual(find_labels(__a) , ['labels']) self.assertEqual(find_labels(__a) , ['labels', 'next_sentence_label']) self.assertEqual(find_labels(__a) , ['start_positions', 'end_positions']) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , ['labels']) @require_flax def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class a_ ( UpperCamelCase_ ): pass self.assertEqual(find_labels(__a) , [])
61
0
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _SCREAMING_SNAKE_CASE ( A : Dict , A : List[Any] , A : List[str] , A : Any , A : Optional[int] ) -> Union[str, Any]: """simple docstring""" with open(A ) as metadata_file: __snake_case : Dict = json.load(A ) __snake_case : Dict = LukeConfig(use_entity_aware_attention=A , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(A , map_location='cpu' ) # Load the entity vocab file __snake_case : Union[str, Any] = load_entity_vocab(A ) __snake_case : List[Any] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Dict = AddedToken('<ent>' , lstrip=A , rstrip=A ) __snake_case : Tuple = AddedToken('<ent2>' , lstrip=A , rstrip=A ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(A ) with open(os.path.join(A , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(A , A ) __snake_case : int = LukeTokenizer.from_pretrained(A ) # Initialize the embeddings of the special tokens __snake_case : Dict = state_dict['embeddings.word_embeddings.weight'] __snake_case : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __snake_case : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : List[str] = F"""encoder.layer.{layer_index}.attention.self.""" __snake_case : List[str] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : int = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : List[Any] = state_dict['entity_embeddings.entity_embeddings.weight'] __snake_case : List[Any] = entity_emb[entity_vocab['[MASK]']] __snake_case : Optional[Any] = LukeModel(config=A ).eval() __snake_case ,__snake_case : List[Any] = model.load_state_dict(A , strict=A ) if not (len(A ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F"""Missing keys {", ".join(A )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" ) # Check outputs __snake_case : Dict = LukeTokenizer.from_pretrained(A , task='entity_classification' ) __snake_case : Dict = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __snake_case : List[str] = (39, 42) __snake_case : Dict = tokenizer(A , entity_spans=[span] , add_prefix_space=A , return_tensors='pt' ) __snake_case : Dict = model(**A ) # Verify word hidden states if model_size == "large": __snake_case : Union[str, Any] = torch.Size((1, 42, 10_24) ) __snake_case : Optional[int] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base __snake_case : Union[str, Any] = torch.Size((1, 42, 7_68) ) __snake_case : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __snake_case : Tuple = torch.Size((1, 1, 10_24) ) __snake_case : List[str] = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base __snake_case : Union[str, Any] = torch.Size((1, 1, 7_68) ) __snake_case : Dict = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(A ) ) model.save_pretrained(A ) def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = {} with open(A , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(A ): __snake_case ,__snake_case : Optional[Any] = line.rstrip().split('\t' ) __snake_case : List[str] = index return entity_vocab if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) __A = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
720
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class a_ : def __init__(self , __a , __a=1_4 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=9_9 , __a=3_2 , __a=5 , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_1_2 , __a=1_6 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[Any]: """simple docstring""" __snake_case : int = parent __snake_case : str = batch_size __snake_case : Dict = seq_length __snake_case : Dict = is_training __snake_case : List[str] = use_token_type_ids __snake_case : Dict = use_input_mask __snake_case : Tuple = use_labels __snake_case : int = use_mc_token_ids __snake_case : List[Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : str = intermediate_size __snake_case : Optional[int] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Any = type_vocab_size __snake_case : Optional[Any] = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Dict = num_choices __snake_case : int = scope __snake_case : str = self.vocab_size - 1 def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case : int = None if self.use_input_mask: __snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length]) __snake_case : Tuple = None if self.use_token_type_ids: __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case : Optional[Any] = None if self.use_mc_token_ids: __snake_case : Optional[int] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length) __snake_case : Any = None __snake_case : Any = None __snake_case : Optional[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case : Any = ids_tensor([self.batch_size] , self.num_choices) __snake_case : List[Any] = self.get_config() __snake_case : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a , __a , *__a) -> Union[str, Any]: """simple docstring""" __snake_case : int = CTRLModel(config=_lowercase) model.to(_lowercase) model.eval() model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase) model(_lowercase , token_type_ids=_lowercase) __snake_case : List[Any] = model(_lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values) , config.n_layer) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a , __a , *__a) -> int: """simple docstring""" __snake_case : Tuple = CTRLLMHeadModel(_lowercase) model.to(_lowercase) model.eval() __snake_case : Optional[int] = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() ( __snake_case ) : List[Any] = config_and_inputs __snake_case : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a , *__a) -> Optional[Any]: """simple docstring""" __snake_case : str = self.num_labels __snake_case : str = CTRLForSequenceClassification(_lowercase) model.to(_lowercase) model.eval() __snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : List[str] = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) @require_torch class a_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): _snake_case = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () _snake_case = (CTRLLMHeadModel,) if is_torch_available() else () _snake_case = ( { """feature-extraction""": CTRLModel, """text-classification""": CTRLForSequenceClassification, """text-generation""": CTRLLMHeadModel, """zero-shot""": CTRLForSequenceClassification, } if is_torch_available() else {} ) _snake_case = True _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a , __a) -> List[str]: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Tuple = CTRLModelTester(self) __snake_case : Dict = ConfigTester(self , config_class=_lowercase , n_embd=3_7) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_lowercase) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_lowercase) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[str] = CTRLModel.from_pretrained(_lowercase) self.assertIsNotNone(_lowercase) @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass @require_torch class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = CTRLLMHeadModel.from_pretrained('ctrl') model.to(_lowercase) __snake_case : List[str] = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_lowercase) # Legal the president is __snake_case : Union[str, Any] = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __snake_case : Optional[Any] = model.generate(_lowercase , do_sample=_lowercase) self.assertListEqual(output_ids[0].tolist() , _lowercase)
721
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : str = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" __snake_case : Union[str, Any] = 0 while number > 0: __snake_case : Dict = number % 10 sum_of_digits += last_digit __snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int: """simple docstring""" __snake_case : List[Any] = factorial(A ) __snake_case : Dict = split_and_add(A ) return result if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
61
0
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _SCREAMING_SNAKE_CASE ( A : List[Any] , A : Any , A : int=1e-12 ) -> str: """simple docstring""" __snake_case : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T __snake_case : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T return jnp.matmul(snake_case__ , norm_emb_a.T ) class a_ ( nn.Module ): _snake_case = 42 _snake_case = jnp.floataa def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : List[Any] = FlaxCLIPVisionModule(self.config.vision_config) __snake_case : List[str] = nn.Dense(self.config.projection_dim , use_bias=A_ , dtype=self.dtype) __snake_case : Tuple = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim)) __snake_case : str = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim)) __snake_case : Tuple = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,)) __snake_case : str = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,)) def __call__(self , __a) -> Optional[Any]: """simple docstring""" __snake_case : Tuple = self.vision_model(A_)[1] __snake_case : Tuple = self.visual_projection(A_) __snake_case : Dict = jax_cosine_distance(A_ , self.special_care_embeds) __snake_case : Optional[Any] = jax_cosine_distance(A_ , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __snake_case : Optional[int] = 0.0 __snake_case : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __snake_case : Dict = jnp.round(A_ , 3) __snake_case : str = jnp.any(special_scores > 0 , axis=1 , keepdims=A_) # Use a lower threshold if an image has any special care concept __snake_case : List[str] = is_special_care * 0.01 __snake_case : Optional[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __snake_case : Tuple = jnp.round(A_ , 3) __snake_case : Dict = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a_ ( _lowercase ): _snake_case = CLIPConfig _snake_case = '''clip_input''' _snake_case = FlaxStableDiffusionSafetyCheckerModule def __init__(self , __a , __a = None , __a = 0 , __a = jnp.floataa , __a = True , **__a , ) -> int: """simple docstring""" if input_shape is None: __snake_case : Any = (1, 2_2_4, 2_2_4, 3) __snake_case : str = self.module_class(config=A_ , dtype=A_ , **A_) super().__init__(A_ , A_ , input_shape=A_ , seed=A_ , dtype=A_ , _do_init=_do_init) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None) -> FrozenDict: """simple docstring""" __snake_case : Optional[int] = jax.random.normal(A_ , A_) __snake_case ,__snake_case : Union[str, Any] = jax.random.split(A_) __snake_case : List[Any] = {'params': params_rng, 'dropout': dropout_rng} __snake_case : Optional[int] = self.module.init(A_ , A_)['params'] return random_params def __call__(self , __a , __a = None , ) -> List[str]: """simple docstring""" __snake_case : int = jnp.transpose(A_ , (0, 2, 3, 1)) return self.module.apply( {'params': params or self.params} , jnp.array(A_ , dtype=jnp.floataa) , rngs={} , )
700
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a_ ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]: """simple docstring""" __snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} __snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __snake_case : Optional[int] = parent __snake_case : Dict = batch_size __snake_case : str = num_channels __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = min_resolution __snake_case : Tuple = max_resolution __snake_case : Optional[int] = do_resize __snake_case : Optional[int] = size __snake_case : Union[str, Any] = do_center_crop __snake_case : List[Any] = crop_size __snake_case : int = do_normalize __snake_case : Optional[Any] = image_mean __snake_case : str = image_std __snake_case : Optional[Any] = do_convert_rgb def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __snake_case : Optional[int] = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: __snake_case : Dict = [] for i in range(self.batch_size): __snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] if torchify: __snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs] return image_inputs @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a) @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4}) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8}) __snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4) self.assertEqual(image_processor.size , {'shortest_edge': 4_2}) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4}) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input __snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class a_ ( UpperCamelCase_ , unittest.TestCase ): _snake_case = ChineseCLIPImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a) __snake_case : List[Any] = 3 @property def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , 'do_resize')) self.assertTrue(hasattr(__a , 'size')) self.assertTrue(hasattr(__a , 'do_center_crop')) self.assertTrue(hasattr(__a , 'center_crop')) self.assertTrue(hasattr(__a , 'do_normalize')) self.assertTrue(hasattr(__a , 'image_mean')) self.assertTrue(hasattr(__a , 'image_std')) self.assertTrue(hasattr(__a , 'do_convert_rgb')) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input __snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
61
0
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = '''Hello world! cécé herlolip''' def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Tuple , A : str ) -> Dict: """simple docstring""" __snake_case : Optional[int] = FairseqRobertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout __snake_case : Tuple = roberta.model.encoder.sentence_encoder __snake_case : Dict = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: __snake_case : Tuple = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , __SCREAMING_SNAKE_CASE ) __snake_case : Tuple = XLMRobertaXLForSequenceClassification(__SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(__SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings __snake_case : Optional[int] = roberta_sent_encoder.embed_tokens.weight __snake_case : List[str] = roberta_sent_encoder.embed_positions.weight __snake_case : Optional[int] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __snake_case : int = roberta_sent_encoder.layer_norm.weight __snake_case : Optional[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __snake_case : Tuple = model.roberta.encoder.layer[i] __snake_case : Optional[int] = roberta_sent_encoder.layers[i] __snake_case : List[Any] = layer.attention __snake_case : List[str] = roberta_layer.self_attn_layer_norm.weight __snake_case : Any = roberta_layer.self_attn_layer_norm.bias # self attention __snake_case : str = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __snake_case : Any = roberta_layer.self_attn.q_proj.weight __snake_case : List[Any] = roberta_layer.self_attn.q_proj.bias __snake_case : List[str] = roberta_layer.self_attn.k_proj.weight __snake_case : List[str] = roberta_layer.self_attn.k_proj.bias __snake_case : Optional[Any] = roberta_layer.self_attn.v_proj.weight __snake_case : List[Any] = roberta_layer.self_attn.v_proj.bias # self-attention output __snake_case : int = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __snake_case : Optional[Any] = roberta_layer.self_attn.out_proj.weight __snake_case : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __snake_case : List[Any] = roberta_layer.final_layer_norm.weight __snake_case : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate __snake_case : List[Any] = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __snake_case : Optional[Any] = roberta_layer.fca.weight __snake_case : Any = roberta_layer.fca.bias # output __snake_case : Dict = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __snake_case : Optional[Any] = roberta_layer.fca.weight __snake_case : str = roberta_layer.fca.bias # end of layer if classification_head: __snake_case : str = roberta.model.classification_heads['mnli'].dense.weight __snake_case : Dict = roberta.model.classification_heads['mnli'].dense.bias __snake_case : Any = roberta.model.classification_heads['mnli'].out_proj.weight __snake_case : List[Any] = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head __snake_case : Optional[Any] = roberta.model.encoder.lm_head.dense.weight __snake_case : List[Any] = roberta.model.encoder.lm_head.dense.bias __snake_case : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight __snake_case : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias __snake_case : Union[str, Any] = roberta.model.encoder.lm_head.weight __snake_case : Optional[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __snake_case : Union[str, Any] = roberta.encode(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 __snake_case : Optional[int] = model(__SCREAMING_SNAKE_CASE )[0] if classification_head: __snake_case : int = roberta.model.classification_heads['mnli'](roberta.extract_features(__SCREAMING_SNAKE_CASE ) ) else: __snake_case : Union[str, Any] = roberta.model(__SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) __snake_case : Tuple = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 __snake_case : int = torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(__SCREAMING_SNAKE_CASE ).mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __A = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a_ ( UpperCamelCase_ ): _snake_case = """vit_msn""" def __init__(self , __a=7_6_8 , __a=1_2 , __a=1_2 , __a=3_0_7_2 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1E-06 , __a=2_2_4 , __a=1_6 , __a=3 , __a=True , **__a , ) -> Any: """simple docstring""" super().__init__(**__a) __snake_case : List[str] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : List[str] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[str] = initializer_range __snake_case : Optional[int] = layer_norm_eps __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Dict = num_channels __snake_case : Tuple = qkv_bias
61
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> int: """simple docstring""" if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) __snake_case : Optional[int] = b * b - 4 * a * c __snake_case : Any = (-b + sqrt(A )) / (2 * a) __snake_case : Any = (-b - sqrt(A )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def _SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" __snake_case ,__snake_case : Union[str, Any] = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
702
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : float , A : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __snake_case : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A ) ) return round(A , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> Union[str, Any]: """simple docstring""" if len(snake_case_ ) <= 1: return lst __snake_case : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: __snake_case : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: __snake_case : Union[str, Any] = 1 return lst if __name__ == "__main__": __A = input('''Enter numbers separated by a comma:\n''').strip() __A = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int = 4_00_00_00 ) -> int: """simple docstring""" __snake_case : Any = [0, 1] __snake_case : List[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __snake_case : Optional[Any] = 0 for j in range(len(_lowerCamelCase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
704
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __A = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __snake_case : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A )
61
0
'''simple docstring''' import operator as op __A = '''scaler.pt''' __A = '''pytorch_model''' __A = '''random_states''' __A = '''optimizer''' __A = '''scheduler''' __A = '''pytorch_model.bin''' __A = '''pytorch_model.bin.index.json''' __A = '''model.safetensors''' __A = '''model.safetensors.index.json''' __A = '''1.10.2''' __A = '''py38''' __A = '''4.17.0''' __A = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] __A = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] __A = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] __A = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] __A = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] __A = '''2.0.1''' __A = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] __A = ['''default''', '''reduce-overhead''', '''max-autotune'''] __A = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 __A = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] __A = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] __A = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
705
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
'''simple docstring''' import datasets from .evaluate import evaluate __A = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' __A = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' __A = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')}, 'references': { 'id': datasets.Value('string'), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string'), 'answer_start': datasets.Value('int32'), }), }, }) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Union[str, Any]: """simple docstring""" __snake_case : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} __snake_case : Tuple = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] __snake_case : int = evaluate(dataset=a_ , predictions=a_) return score
706
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _SCREAMING_SNAKE_CASE ( A : Optional[Any] ) -> int: """simple docstring""" if not is_accelerate_available(): return method __snake_case : Optional[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(A ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[Any] , *A : Optional[Any] , **A : Optional[int] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *A , **A ) return wrapper
61
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""input_features""", """attention_mask"""] def __init__(self , __a=8_0 , __a=1_6_0_0_0 , __a=0.0 , __a=1_0 , __a=2_5 , __a="hamming_window" , __a=3_2_7_6_8.0 , __a=0.97 , __a=1.0 , __a=True , __a=True , __a=False , **__a , ) -> Tuple: """simple docstring""" super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase) __snake_case : Dict = feature_size __snake_case : str = sampling_rate __snake_case : Dict = padding_value __snake_case : List[str] = hop_length __snake_case : Dict = win_length __snake_case : Optional[Any] = frame_signal_scale __snake_case : List[Any] = preemphasis_coeff __snake_case : Any = mel_floor __snake_case : Dict = normalize_means __snake_case : str = normalize_vars __snake_case : Dict = win_function __snake_case : List[str] = return_attention_mask __snake_case : int = win_length * sampling_rate // 1_0_0_0 __snake_case : int = hop_length * sampling_rate // 1_0_0_0 __snake_case : Optional[Any] = optimal_fft_length(self.sample_size) __snake_case : List[Any] = (self.n_fft // 2) + 1 def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" if self.win_function == "hamming_window": __snake_case : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase) else: __snake_case : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function) __snake_case : Tuple = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __snake_case : List[str] = spectrogram( one_waveform * self.frame_signal_scale , window=__lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__lowerCAmelCase , mel_floor=self.mel_floor , log_mel='log' , ) return msfc_features.T def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> List[Any]: """simple docstring""" if self.normalize_means: __snake_case : Any = x[:input_length].mean(axis=0) __snake_case : List[Any] = np.subtract(__lowerCAmelCase , __lowerCAmelCase) if self.normalize_vars: __snake_case : Tuple = x[:input_length].std(axis=0) __snake_case : str = np.divide(__lowerCAmelCase , __lowerCAmelCase) if input_length < x.shape[0]: __snake_case : int = padding_value # make sure array is in float32 __snake_case : Optional[Any] = x.astype(np.floataa) return x def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> Any: """simple docstring""" __snake_case : Tuple = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__lowerCAmelCase , __lowerCAmelCase , self.padding_value) for x, n in zip(__lowerCAmelCase , __lowerCAmelCase)] def __call__(self , __a , __a = False , __a = None , __a = False , __a = None , __a = None , __a = None , __a = None , **__a , ) -> List[Any]: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.') __snake_case : Tuple = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""") __snake_case : str = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __snake_case : Tuple = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray): __snake_case : Tuple = np.asarray(__lowerCAmelCase , dtype=np.floataa) elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __snake_case : int = raw_speech.astype(np.floataa) # always return batch if not is_batched: __snake_case : Union[str, Any] = [raw_speech] # extract fbank features __snake_case : List[str] = [self._extract_mfsc_features(__lowerCAmelCase) for one_waveform in raw_speech] # convert into correct format for padding __snake_case : List[Any] = BatchFeature({'input_features': features}) __snake_case : Tuple = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) # make sure list is in array format __snake_case : Union[str, Any] = padded_inputs.get('input_features') if isinstance(input_features[0] , __lowerCAmelCase): __snake_case : Dict = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features] __snake_case : Any = padded_inputs.get('attention_mask') if attention_mask is not None: __snake_case : Any = [np.asarray(__lowerCAmelCase , dtype=np.intaa) for array in attention_mask] if self.normalize_means or self.normalize_vars: __snake_case : List[str] = ( np.array(__lowerCAmelCase , dtype=np.intaa) if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __snake_case : List[str] = self.normalize( padded_inputs['input_features'] , attention_mask=__lowerCAmelCase) if return_tensors is not None: __snake_case : Dict = padded_inputs.convert_to_tensors(__lowerCAmelCase) return padded_inputs
707
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class a_ ( unittest.TestCase , UpperCamelCase_ ): def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[str] = load_tool('text-to-speech') self.tool.setup() def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Dict = self.tool('hey') __snake_case : List[Any] = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = self.tool('hey') __snake_case : Any = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
61
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def _SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
708
'''simple docstring''' import math class a_ : def __init__(self , __a=0) -> Any: # a graph with Node 0,1,...,N-1 """simple docstring""" __snake_case : List[str] = n __snake_case : Tuple = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # adjacency matrix for weight __snake_case : Union[str, Any] = [ [math.inf for j in range(0 , __a)] for i in range(0 , __a) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Union[str, Any] = w def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): __snake_case : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": __A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
61
0
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging __A = logging.get_logger(__name__) # pylint: disable=invalid-name class a_ ( a__ ): def __init__(self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> int: super().__init__() if safety_checker is None: logger.warning( F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , ) def SCREAMING_SNAKE_CASE__ (self , __a = "auto") -> int: if slice_size == "auto": __snake_case : Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: self.enable_attention_slicing(lowerCAmelCase__) @torch.no_grad() def __call__(self , __a , __a=1_6_0_0_0 , __a = 5_1_2 , __a = 5_1_2 , __a = 5_0 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ) -> Optional[Any]: __snake_case : Optional[int] = self.speech_processor.feature_extractor( lowerCAmelCase__ , return_tensors='pt' , sampling_rate=lowerCAmelCase__).input_features.to(self.device) __snake_case : Optional[Any] = self.speech_model.generate(lowerCAmelCase__ , max_length=4_8_0_0_0_0) __snake_case : Tuple = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__)[ 0 ] if isinstance(lowerCAmelCase__ , lowerCAmelCase__): __snake_case : Optional[Any] = 1 elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): __snake_case : Any = len(lowerCAmelCase__) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}""") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(lowerCAmelCase__)}.""") # get prompt text embeddings __snake_case : Union[str, Any] = self.tokenizer( lowerCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __snake_case : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F""" {self.tokenizer.model_max_length} tokens: {removed_text}""") __snake_case : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case : int = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case : Dict = text_embeddings.shape __snake_case : Any = text_embeddings.repeat(1 , lowerCAmelCase__ , 1) __snake_case : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case : List[str] if negative_prompt is None: __snake_case : Any = [""] * batch_size elif type(lowerCAmelCase__) is not type(lowerCAmelCase__): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__)} !=""" F""" {type(lowerCAmelCase__)}.""") elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): __snake_case : Optional[Any] = [negative_prompt] elif batch_size != len(lowerCAmelCase__): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__)}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ' the batch size of `prompt`.') else: __snake_case : Optional[int] = negative_prompt __snake_case : Dict = text_input_ids.shape[-1] __snake_case : Any = self.tokenizer( lowerCAmelCase__ , padding='max_length' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' , ) __snake_case : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : int = uncond_embeddings.shape[1] __snake_case : int = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1) __snake_case : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Optional[int] = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case : Union[str, Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='cpu' , dtype=lowerCAmelCase__).to( self.device) else: __snake_case : Optional[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""") __snake_case : str = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowerCAmelCase__) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __snake_case : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : List[Any] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case : Union[str, Any] = {} if accepts_eta: __snake_case : Any = eta for i, t in enumerate(self.progress_bar(lowerCAmelCase__)): # expand the latents if we are doing classifier free guidance __snake_case : int = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __snake_case : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__) # predict the noise residual __snake_case : Optional[int] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__).sample # perform guidance if do_classifier_free_guidance: __snake_case : Union[str, Any] = noise_pred.chunk(2) __snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case : List[str] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) __snake_case : Optional[Any] = 1 / 0.18_215 * latents __snake_case : Union[str, Any] = self.vae.decode(lowerCAmelCase__).sample __snake_case : int = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case : Optional[int] = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__)
709
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class a_ ( UpperCamelCase_ ): _snake_case = ["""pixel_values"""] def __init__(self , __a = True , __a = None , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 2_5_5 , __a = True , __a = None , __a = None , **__a , ) -> None: """simple docstring""" super().__init__(**__a) __snake_case : Tuple = size if size is not None else {'shortest_edge': 3_8_4} __snake_case : List[Any] = get_size_dict(__a , default_to_square=__a) __snake_case : int = do_resize __snake_case : List[str] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case : Tuple = resample __snake_case : Dict = do_rescale __snake_case : Any = rescale_factor __snake_case : str = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: """simple docstring""" __snake_case : Dict = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""") __snake_case : List[str] = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : Any = int(shortest_edge / crop_pct) __snake_case : Any = get_resize_output_image_size(__a , size=__a , default_to_square=__a) __snake_case : int = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a) else: # warping (no cropping) when evaluated at 384 or larger return resize( __a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a = None , **__a , ) -> Any: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def SCREAMING_SNAKE_CASE__ (self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image: """simple docstring""" __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Dict = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[int] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : List[str] = size if size is not None else self.size __snake_case : Any = get_size_dict(__a , default_to_square=__a) __snake_case : Dict = make_list_of_images(__a) if not valid_images(__a): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case : Tuple = [to_numpy_array(__a) for image in images] if do_resize: __snake_case : Optional[int] = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: __snake_case : Any = [self.normalize(image=__a , mean=__a , std=__a) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a) for image in images] __snake_case : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a)
61
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): _snake_case = StableDiffusionPanoramaPipeline _snake_case = TEXT_TO_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS _snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" torch.manual_seed(0) __snake_case : Any = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) __snake_case : Optional[Any] = DDIMScheduler() torch.manual_seed(0) __snake_case : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) __snake_case : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) __snake_case : Optional[int] = CLIPTextModel(__lowerCAmelCase) __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __snake_case : Dict = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE__ (self , __a , __a=0) -> Optional[int]: """simple docstring""" __snake_case : int = torch.manual_seed(__lowerCAmelCase) __snake_case : Tuple = { 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : Tuple = self.get_dummy_components() __snake_case : str = StableDiffusionPanoramaPipeline(**__lowerCAmelCase) __snake_case : int = sd_pipe.to(__lowerCAmelCase) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase) __snake_case : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase) __snake_case : Any = sd_pipe(**__lowerCAmelCase).images __snake_case : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __snake_case : Tuple = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2]) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : List[Any] = self.get_dummy_components() __snake_case : Tuple = StableDiffusionPanoramaPipeline(**__lowerCAmelCase) __snake_case : List[Any] = sd_pipe.to(__lowerCAmelCase) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase) __snake_case : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase) __snake_case : Optional[Any] = 'french fries' __snake_case : Dict = sd_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase) __snake_case : Any = output.images __snake_case : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __snake_case : str = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : Tuple = self.get_dummy_components() __snake_case : Union[str, Any] = StableDiffusionPanoramaPipeline(**__lowerCAmelCase) __snake_case : Optional[Any] = sd_pipe.to(__lowerCAmelCase) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase) __snake_case : List[Any] = self.get_dummy_inputs(__lowerCAmelCase) __snake_case : Union[str, Any] = sd_pipe(**__lowerCAmelCase , view_batch_size=2) __snake_case : str = output.images __snake_case : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __snake_case : Tuple = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : List[Any] = self.get_dummy_components() __snake_case : str = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear') __snake_case : Any = StableDiffusionPanoramaPipeline(**__lowerCAmelCase) __snake_case : Union[str, Any] = sd_pipe.to(__lowerCAmelCase) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase) __snake_case : List[Any] = self.get_dummy_inputs(__lowerCAmelCase) __snake_case : Union[str, Any] = sd_pipe(**__lowerCAmelCase).images __snake_case : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __snake_case : Optional[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case : List[Any] = self.get_dummy_components() __snake_case : str = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=__lowerCAmelCase) __snake_case : str = StableDiffusionPanoramaPipeline(**__lowerCAmelCase) __snake_case : List[Any] = sd_pipe.to(__lowerCAmelCase) sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase) __snake_case : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase) __snake_case : int = sd_pipe(**__lowerCAmelCase).images __snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __snake_case : Dict = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch_gpu class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ (self , __a=0) -> Tuple: """simple docstring""" __snake_case : List[str] = torch.manual_seed(__lowerCAmelCase) __snake_case : int = { 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = 'stabilityai/stable-diffusion-2-base' __snake_case : str = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder='scheduler') __snake_case : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase) pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) pipe.enable_attention_slicing() __snake_case : Any = self.get_inputs() __snake_case : Any = pipe(**__lowerCAmelCase).images __snake_case : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __snake_case : Optional[Any] = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ]) assert np.abs(expected_slice - image_slice).max() < 1E-2 def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : int = StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' , safety_checker=__lowerCAmelCase) __snake_case : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) pipe.enable_attention_slicing() __snake_case : Dict = self.get_inputs() __snake_case : Dict = pipe(**__lowerCAmelCase).images __snake_case : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __snake_case : Optional[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : List[Any] = 0 def callback_fn(__a , __a , __a) -> None: __snake_case : str = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __snake_case : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __snake_case : List[Any] = latents[0, -3:, -3:, -1] __snake_case : Tuple = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 elif step == 2: __snake_case : int = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __snake_case : Any = latents[0, -3:, -3:, -1] __snake_case : Any = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 __snake_case : Dict = False __snake_case : Tuple = 'stabilityai/stable-diffusion-2-base' __snake_case : Union[str, Any] = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder='scheduler') __snake_case : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase) __snake_case : Union[str, Any] = pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) pipe.enable_attention_slicing() __snake_case : List[Any] = self.get_inputs() pipe(**__lowerCAmelCase , callback=__lowerCAmelCase , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case : Tuple = 'stabilityai/stable-diffusion-2-base' __snake_case : int = DDIMScheduler.from_pretrained(__lowerCAmelCase , subfolder='scheduler') __snake_case : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase) __snake_case : List[str] = pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() __snake_case : str = self.get_inputs() __snake_case : Dict = pipe(**__lowerCAmelCase) __snake_case : int = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 1_0**9
710
'''simple docstring''' from functools import lru_cache @lru_cache def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
61
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a_ ( __UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'width_multiplier')) class a_ : def __init__(self , __a , __a=1_3 , __a=6_4 , __a=2 , __a=3 , __a="swish" , __a=3 , __a=3_2 , __a=0.1 , __a=0.02 , __a=True , __a=True , __a=1_0 , __a=None , __a=0.25 , __a=0.0 , __a=0.0 , ) -> Tuple: """simple docstring""" __snake_case : Optional[int] = parent __snake_case : List[str] = batch_size __snake_case : Tuple = image_size __snake_case : Tuple = patch_size __snake_case : Optional[int] = num_channels __snake_case : Optional[Any] = make_divisible(5_1_2 * width_multiplier , divisor=8) __snake_case : Union[str, Any] = hidden_act __snake_case : Any = conv_kernel_size __snake_case : int = output_stride __snake_case : List[Any] = classifier_dropout_prob __snake_case : Any = use_labels __snake_case : int = is_training __snake_case : List[str] = num_labels __snake_case : int = initializer_range __snake_case : Optional[int] = scope __snake_case : List[str] = width_multiplier __snake_case : Any = ffn_dropout __snake_case : List[str] = attn_dropout def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None __snake_case : Dict = None if self.use_labels: __snake_case : int = ids_tensor([self.batch_size] , self.num_labels) __snake_case : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) __snake_case : Any = self.get_config() return config, pixel_values, labels, pixel_labels def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a) -> str: """simple docstring""" __snake_case : int = MobileViTVaModel(config=UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() __snake_case : List[str] = model(UpperCAmelCase_) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a) -> str: """simple docstring""" __snake_case : Union[str, Any] = self.num_labels __snake_case : int = MobileViTVaForImageClassification(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() __snake_case : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , __a) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = self.num_labels __snake_case : Optional[int] = MobileViTVaForSemanticSegmentation(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() __snake_case : List[Any] = model(UpperCAmelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : Tuple = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case ,__snake_case : Optional[Any] = config_and_inputs __snake_case : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): _snake_case = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _snake_case = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Any = MobileViTVaModelTester(self) __snake_case : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" pass @unittest.skip(reason='MobileViTV2 does not output attentions') def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = model_class(UpperCAmelCase_) __snake_case : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Tuple = [*signature.parameters.keys()] __snake_case : str = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase_) def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : Union[str, Any] = model_class(UpperCAmelCase_) model.to(UpperCAmelCase_) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)) __snake_case : Tuple = outputs.hidden_states __snake_case : List[Any] = 5 self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : List[Any] = 2 for i in range(len(UpperCAmelCase_)): self.assertListEqual( list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2) __snake_case ,__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : List[Any] = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_) @slow def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = MobileViTVaModel.from_pretrained(UpperCAmelCase_) self.assertIsNotNone(UpperCAmelCase_) def _SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to( UpperCAmelCase_) __snake_case : List[Any] = self.default_image_processor __snake_case : Any = prepare_img() __snake_case : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): __snake_case : Dict = model(**UpperCAmelCase_) # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , UpperCAmelCase_) __snake_case : Any = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01]).to(UpperCAmelCase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3') __snake_case : str = model.to(UpperCAmelCase_) __snake_case : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3') __snake_case : str = prepare_img() __snake_case : Any = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): __snake_case : Any = model(**UpperCAmelCase_) __snake_case : Optional[Any] = outputs.logits # verify the logits __snake_case : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2)) self.assertEqual(logits.shape , UpperCAmelCase_) __snake_case : Dict = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCAmelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3') __snake_case : str = model.to(UpperCAmelCase_) __snake_case : Union[str, Any] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3') __snake_case : Any = prepare_img() __snake_case : Any = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_) # forward pass with torch.no_grad(): __snake_case : str = model(**UpperCAmelCase_) __snake_case : Dict = outputs.logits.detach().cpu() __snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(5_0, 6_0)]) __snake_case : Any = torch.Size((5_0, 6_0)) self.assertEqual(segmentation[0].shape , UpperCAmelCase_) __snake_case : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_) __snake_case : int = torch.Size((3_2, 3_2)) self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
711
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = VQModel _snake_case = """sample""" @property def SCREAMING_SNAKE_CASE__ (self , __a=(3_2, 3_2)) -> str: """simple docstring""" __snake_case : Dict = 4 __snake_case : Optional[int] = 3 __snake_case : str = floats_tensor((batch_size, num_channels) + sizes).to(__a) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Optional[Any] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } __snake_case : List[Any] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case ,__snake_case : List[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__a) self.assertIsNotNone(__a) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(__a) __snake_case : Any = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(__a).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) __snake_case : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) __snake_case : Optional[int] = image.to(__a) with torch.no_grad(): __snake_case : List[Any] = model(__a).sample __snake_case : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off __snake_case : int = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143]) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=1E-3))
61
0
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __A = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class a_ ( UpperCamelCase_ ): def __init__(self , **__a) -> Any: """simple docstring""" super().__init__(**UpperCamelCase__) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING) def __call__(self , __a , **__a) -> Dict: """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__) def SCREAMING_SNAKE_CASE__ (self , **__a) -> List[Any]: """simple docstring""" __snake_case : Dict = {} if "candidate_labels" in kwargs: __snake_case : Dict = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: __snake_case : int = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def SCREAMING_SNAKE_CASE__ (self , __a , __a=None , __a="This is a photo of {}.") -> Dict: """simple docstring""" __snake_case : List[str] = load_image(UpperCamelCase__) __snake_case : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework) __snake_case : Optional[Any] = candidate_labels __snake_case : List[Any] = [hypothesis_template.format(UpperCamelCase__) for x in candidate_labels] __snake_case : Dict = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__) __snake_case : Optional[int] = [text_inputs] return inputs def SCREAMING_SNAKE_CASE__ (self , __a) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = model_inputs.pop('candidate_labels') __snake_case : Optional[int] = model_inputs.pop('text_inputs') if isinstance(text_inputs[0] , UpperCamelCase__): __snake_case : Union[str, Any] = text_inputs[0] else: # Batching case. __snake_case : int = text_inputs[0][0] __snake_case : Any = self.model(**UpperCamelCase__ , **UpperCamelCase__) __snake_case : Dict = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_image, } return model_outputs def SCREAMING_SNAKE_CASE__ (self , __a) -> int: """simple docstring""" __snake_case : str = model_outputs.pop('candidate_labels') __snake_case : str = model_outputs['''logits'''][0] if self.framework == "pt": __snake_case : Optional[int] = logits.softmax(dim=-1).squeeze(-1) __snake_case : List[str] = probs.tolist() if not isinstance(UpperCamelCase__ , UpperCamelCase__): __snake_case : str = [scores] elif self.framework == "tf": __snake_case : Optional[int] = stable_softmax(UpperCamelCase__ , axis=-1) __snake_case : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""") __snake_case : Any = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__) , key=lambda __a: -x[0]) ] return result
712
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __A = logging.getLogger(__name__) @dataclass class a_ : _snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class a_ : _snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) _snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) __snake_case : List[str] = import_module('tasks' ) try: __snake_case : Any = getattr(A , model_args.task_type ) __snake_case : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , A ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels ) __snake_case : Dict[int, str] = dict(enumerate(A ) ) __snake_case : Optional[Any] = len(A ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , ) __snake_case : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case : List[Any] = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case : int = ( TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case : str = np.argmax(A , axis=2 ) __snake_case ,__snake_case : int = preds.shape __snake_case : Dict = [[] for _ in range(A )] __snake_case : Union[str, Any] = [[] for _ in range(A )] for i in range(A ): for j in range(A ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A : EvalPrediction ) -> Dict: __snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(A , A ), "precision": precision_score(A , A ), "recall": recall_score(A , A ), "f1": fa_score(A , A ), } # Data collator __snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case : Optional[Any] = Trainer( model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : List[str] = trainer.evaluate() __snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) results.update(A ) # Predict if training_args.do_predict: __snake_case : str = TokenClassificationDataset( token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case ,__snake_case ,__snake_case : str = trainer.predict(A ) __snake_case ,__snake_case : List[str] = align_predictions(A , A ) __snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , A , A ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(A , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(A , A , A ) return results def _SCREAMING_SNAKE_CASE ( A : int ) -> Any: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
0
'''simple docstring''' import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _SCREAMING_SNAKE_CASE ( A : Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" if subparsers is not None: __snake_case : List[Any] = subparsers.add_parser('env' ) else: __snake_case : Optional[Any] = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=__UpperCamelCase , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=__UpperCamelCase ) return parser def _SCREAMING_SNAKE_CASE ( A : Union[str, Any] ) -> List[str]: """simple docstring""" __snake_case : List[Any] = torch.__version__ __snake_case : int = torch.cuda.is_available() __snake_case : Any = is_xpu_available() __snake_case : Any = is_npu_available() __snake_case : Optional[int] = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(__UpperCamelCase ): __snake_case : Optional[Any] = load_config_from_file(args.config_file ).to_dict() __snake_case : List[Any] = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'PyTorch XPU available': str(__UpperCamelCase ), 'PyTorch NPU available': str(__UpperCamelCase ), 'System RAM': F"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""", } if pt_cuda_available: __snake_case : Optional[Any] = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) __snake_case : List[Any] = ( '\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else F"""\t{accelerate_config}""" ) print(__UpperCamelCase ) __snake_case : int = accelerate_config return info def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Optional[int] = env_command_parser() __snake_case : Tuple = parser.parse_args() env_command(__UpperCamelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
713
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( A : list ) -> list: """simple docstring""" __snake_case : Tuple = False while is_sorted is False: # Until all the indices are traversed keep looping __snake_case : Optional[Any] = True for i in range(0 , len(A ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : int = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : List[Any] = False for i in range(1 , len(A ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __snake_case ,__snake_case : Tuple = input_list[i + 1], input_list[i] # swapping if elements not in order __snake_case : Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') __A = [int(x) for x in input().split()] # inputing elements of the list in one line __A = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
61
0
'''simple docstring''' import baseaa def _SCREAMING_SNAKE_CASE ( A : str ) -> int: return baseaa.baaencode(string.encode('utf-8' ) ) def _SCREAMING_SNAKE_CASE ( A : List[Any] ) -> List[str]: return baseaa.baadecode(A ).decode('utf-8' ) if __name__ == "__main__": __A = '''Hello World!''' __A = baseaa_encode(test) print(encoded) __A = baseaa_decode(encoded) print(decoded)
714
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": __snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A ) else: __snake_case : Tuple = timm.create_model('levit_128' , pretrained=A ) if hidden_sizes == 1_92: __snake_case : int = timm.create_model('levit_192' , pretrained=A ) if hidden_sizes == 2_56: __snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A ) if hidden_sizes == 3_84: __snake_case : int = timm.create_model('levit_384' , pretrained=A ) from_model.eval() __snake_case : str = LevitForImageClassificationWithTeacher(A ).eval() __snake_case : int = OrderedDict() __snake_case : Optional[Any] = from_model.state_dict() __snake_case : Tuple = list(from_model.state_dict().keys() ) __snake_case : List[str] = list(our_model.state_dict().keys() ) print(len(A ) , len(A ) ) for i in range(len(A ) ): __snake_case : Optional[int] = weights[og_keys[i]] our_model.load_state_dict(A ) __snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) ) __snake_case : Union[str, Any] = from_model(A ) __snake_case : List[str] = our_model(A ).logits assert torch.allclose(A , A ), "The model logits don't match the original one." __snake_case : int = name print(A ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __snake_case : int = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = 'imagenet-1k-id2label.json' __snake_case : Tuple = 10_00 __snake_case : Dict = (1, num_labels) __snake_case : List[str] = 'huggingface/label-files' __snake_case : Any = num_labels __snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) ) __snake_case : Any = {int(A ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} __snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A ) __snake_case : Dict = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } __snake_case : Union[str, Any] = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
61
0
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _SCREAMING_SNAKE_CASE ( A : str ) -> List[str]: """simple docstring""" __snake_case : Dict = [] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for v in tree.values(): shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('Not supported' ) return shapes @torch.jit.ignore def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : List[Any] ) -> Any: """simple docstring""" __snake_case : Optional[Any] = [] for d in reversed(SCREAMING_SNAKE_CASE_ ): idx.append(flat_idx % d ) __snake_case : List[Any] = flat_idx // d return tuple(reversed(SCREAMING_SNAKE_CASE_ ) ) @torch.jit.ignore def _SCREAMING_SNAKE_CASE ( A : Dict , A : Union[str, Any] , A : Optional[Any] , A : int = None , A : Optional[int] = None , ) -> int: """simple docstring""" # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(A : Optional[int] ) -> None: __snake_case : Any = True for i in range(len(SCREAMING_SNAKE_CASE_ ) ): __snake_case : Optional[int] = -1 * (i + 1) l[reversed_idx] &= tally __snake_case : List[Any] = l[reversed_idx] if start_edges is None: __snake_case : Optional[Any] = [s == 0 for s in start] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) if end_edges is None: __snake_case : Optional[int] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] reduce_edge_list(SCREAMING_SNAKE_CASE_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(SCREAMING_SNAKE_CASE_ ) == 0: return [()] elif len(SCREAMING_SNAKE_CASE_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] __snake_case : Optional[int] = [] __snake_case : List[str] = [] # Dimensions common to start and end can be selected directly for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if s == e: path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) ) else: break __snake_case : Optional[int] = tuple(SCREAMING_SNAKE_CASE_ ) __snake_case : Dict = len(SCREAMING_SNAKE_CASE_ ) # start == end, and we're done if divergence_idx == len(SCREAMING_SNAKE_CASE_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __snake_case : List[Any] = start[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None __snake_case : Tuple = end[divergence_idx] return tuple( path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) __snake_case : Optional[Any] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _SCREAMING_SNAKE_CASE ( A : Dict , A : Tuple , A : Tuple , A : int ) -> Any: """simple docstring""" __snake_case : str = t.shape[:no_batch_dims] __snake_case : str = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # _get_minimal_slice_set is inclusive __snake_case : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) ) # Get an ordered list of slices to perform __snake_case : List[str] = _get_minimal_slice_set( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) __snake_case : Optional[int] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Any , A : Union[str, Any] , A : List[str] , A : Dict = False , A : int = None , A : Optional[Any] = False , ) -> str: """simple docstring""" if not (len(SCREAMING_SNAKE_CASE_ ) > 0): raise ValueError('Must provide at least one input' ) __snake_case : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )] __snake_case : Any = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] ) def _prep_inputs(A : Union[str, Any] ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: __snake_case : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) __snake_case : Any = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: __snake_case : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t __snake_case : Union[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ ) __snake_case : Tuple = None if _out is not None: __snake_case : Union[str, Any] = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) __snake_case : Dict = 1 for d in orig_batch_dims: flat_batch_dim *= d __snake_case : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(A : List[str] ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t __snake_case : Any = 0 __snake_case : List[str] = prepped_outputs for _ in range(SCREAMING_SNAKE_CASE_ ): # Chunk the input if not low_mem: __snake_case : Union[str, Any] = _select_chunk else: __snake_case : Union[str, Any] = partial( _chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , ) __snake_case : List[Any] = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Run the layer on the chunk __snake_case : Tuple = layer(**SCREAMING_SNAKE_CASE_ ) # Allocate space for the output if out is None: __snake_case : Optional[int] = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) # Put the chunk in its pre-allocated space if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def assign(A : int , A : int ) -> None: for k, v in da.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assign(SCREAMING_SNAKE_CASE_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: __snake_case : List[str] = da[k] assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: __snake_case : Union[str, Any] = xa elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: __snake_case : Tuple = output_chunk else: raise ValueError('Not supported' ) i += chunk_size __snake_case : str = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ ) return out class a_ : def __init__(self , __a = 5_1_2 , ) -> List[str]: """simple docstring""" __snake_case : int = max_chunk_size __snake_case : Union[str, Any] = None __snake_case : Dict = None def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size __snake_case : Union[str, Any] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)] __snake_case : Optional[Any] = [c for c in candidates if c > min_chunk_size] __snake_case : Optional[int] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a) -> bool: try: with torch.no_grad(): fn(*__lowercase , chunk_size=__lowercase) return True except RuntimeError: return False __snake_case : Tuple = 0 __snake_case : int = len(__lowercase) - 1 while i > min_viable_chunk_size_index: __snake_case : Dict = test_chunk_size(candidates[i]) if not viable: __snake_case : Optional[Any] = (min_viable_chunk_size_index + i) // 2 else: __snake_case : int = i __snake_case : Optional[Any] = (i + len(__lowercase) - 1) // 2 return candidates[min_viable_chunk_size_index] def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> int: """simple docstring""" __snake_case : List[Any] = True for aa, aa in zip(__lowercase , __lowercase): assert type(__lowercase) == type(__lowercase) if isinstance(__lowercase , (list, tuple)): consistent &= self._compare_arg_caches(__lowercase , __lowercase) elif isinstance(__lowercase , __lowercase): __snake_case : Any = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])] __snake_case : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])] consistent &= self._compare_arg_caches(__lowercase , __lowercase) else: consistent &= aa == aa return consistent def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a , ) -> int: """simple docstring""" __snake_case : List[str] = True __snake_case : Dict = tree_map(lambda __a: a.shape if isinstance(__lowercase , torch.Tensor) else a , __lowercase , __lowercase) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(__lowercase) __snake_case : List[Any] = self._compare_arg_caches(self.cached_arg_data , __lowercase) else: # Otherwise, we can reuse the precomputed value __snake_case : Optional[Any] = False if not consistent: __snake_case : Dict = self._determine_favorable_chunk_size( __lowercase , __lowercase , __lowercase , ) __snake_case : int = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
715
'''simple docstring''' import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class a_ : def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str: """simple docstring""" __snake_case : Optional[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Optional[Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Optional[Any] = is_training __snake_case : Tuple = use_labels __snake_case : Optional[int] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Tuple = intermediate_size __snake_case : List[str] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : int = encoder_stride __snake_case : List[str] = num_attention_outputs __snake_case : Optional[Any] = embed_dim __snake_case : Optional[Any] = embed_dim + 1 __snake_case : List[str] = resolution __snake_case : Optional[int] = depths __snake_case : List[Any] = hidden_sizes __snake_case : List[str] = dim __snake_case : Union[str, Any] = mlp_expansion_ratio def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[str] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a) __snake_case : int = model(__a , training=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : Dict = self.type_sequence_label_size __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : Optional[int] = model(__a , labels=__a , training=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : List[Any] = 1 __snake_case : List[Any] = TFEfficientFormerForImageClassification(__a) __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : str = model(__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _snake_case = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) _snake_case = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Dict = TFEfficientFormerModelTester(self) __snake_case : List[Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings') def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__a) __snake_case : Union[str, Any] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[int] = [*signature.parameters.keys()] __snake_case : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a) def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__a , __a , __a): __snake_case : str = model_class(__a) __snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : Optional[Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(__a) , __a) if hasattr(self.model_tester , 'encoder_seq_length'): __snake_case : List[Any] = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1: __snake_case : str = seq_length * self.model_tester.chunk_length else: __snake_case : Optional[int] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __snake_case : List[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple)) self.assertEqual(len(__a) , __a) __snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a) self.assertListEqual( list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , ) __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = True check_hidden_states_output(__a , __a , __a) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(__a , __a , __a) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int: """simple docstring""" __snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet') def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = TFEfficientFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = True __snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a) __snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a) __snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a) __snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'): __snake_case : str = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __snake_case : Optional[Any] = True __snake_case : Dict = False __snake_case : Optional[int] = True __snake_case : Dict = model_class(__a) __snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Dict = True __snake_case : str = model_class(__a) __snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a) __snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a) , self.model_tester.num_attention_outputs) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" __snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __snake_case : Tuple = model_class(__a) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __snake_case : Optional[Any] = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a) for key, val in model.input_signature.items() if key in model.dummy_inputs } __snake_case : Tuple = model(__a) self.assertTrue(outputs_dict is not None) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300') __snake_case : Optional[int] = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : List[str] = model(**__a , training=__a) # verify the logits __snake_case : str = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300') __snake_case : List[Any] = self.default_image_processor __snake_case : Union[str, Any] = prepare_img() __snake_case : List[Any] = image_processor(images=__a , return_tensors='tf') # forward pass __snake_case : Optional[int] = model(**__a , training=__a) # verify the logits __snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , __a) __snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499]) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
61
0
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a_ : def __init__(self , __a , __a=2 , __a=3_2 , __a=1_6 , __a=3 , __a=True , __a=True , __a=3_2 , __a=4 , __a=[0, 1, 2, 3] , __a=4 , __a=3_7 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.02 , __a=3 , __a=[1, 3_8_4, 2_4, 2_4] , __a=True , __a=None , ) -> List[str]: """simple docstring""" __snake_case : Dict = parent __snake_case : Optional[int] = batch_size __snake_case : Tuple = image_size __snake_case : Optional[int] = patch_size __snake_case : str = num_channels __snake_case : Tuple = is_training __snake_case : Dict = use_labels __snake_case : Union[str, Any] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : str = backbone_out_indices __snake_case : Tuple = num_attention_heads __snake_case : int = intermediate_size __snake_case : str = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = initializer_range __snake_case : Dict = num_labels __snake_case : Union[str, Any] = backbone_featmap_shape __snake_case : Dict = scope __snake_case : Any = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __snake_case : Any = (image_size // patch_size) ** 2 __snake_case : Optional[int] = num_patches + 1 def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : Optional[int] = None if self.use_labels: __snake_case : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ (self) -> Tuple: """simple docstring""" __snake_case : List[str] = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8], "num_groups": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple: """simple docstring""" __snake_case : str = DPTModel(config=A_) model.to(A_) model.eval() __snake_case : List[Any] = model(A_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> int: """simple docstring""" __snake_case : int = self.num_labels __snake_case : Dict = DPTForDepthEstimation(A_) model.to(A_) model.eval() __snake_case : Dict = model(A_) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size)) def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> str: """simple docstring""" __snake_case : Tuple = self.num_labels __snake_case : int = DPTForSemanticSegmentation(A_) model.to(A_) model.eval() __snake_case : Dict = model(A_ , labels=A_) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size)) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" __snake_case : Tuple = self.prepare_config_and_inputs() __snake_case : Optional[Any] = config_and_inputs __snake_case : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a_ ( __snake_case , __snake_case , unittest.TestCase ): _snake_case = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _snake_case = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = DPTModelTester(self) __snake_case : str = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7) def SCREAMING_SNAKE_CASE__ (self) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds') def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Tuple = model_class(A_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __snake_case : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear)) def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(A_) __snake_case : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Optional[Any] = [*signature.parameters.keys()] __snake_case : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , A_) def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*A_) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_) def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = True if model_class in get_values(A_): continue __snake_case : Optional[int] = model_class(A_) model.to(A_) model.train() __snake_case : Optional[int] = self._prepare_for_class(A_ , A_ , return_labels=A_) __snake_case : Optional[int] = model(**A_).loss loss.backward() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Any = False __snake_case : Optional[int] = True if model_class in get_values(A_) or not model_class.supports_gradient_checkpointing: continue __snake_case : str = model_class(A_) model.to(A_) model.gradient_checkpointing_enable() model.train() __snake_case : Any = self._prepare_for_class(A_ , A_ , return_labels=A_) __snake_case : Any = model(**A_).loss loss.backward() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = _config_zero_init(A_) for model_class in self.all_model_classes: __snake_case : List[str] = model_class(config=A_) # Skip the check for the backbone __snake_case : List[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __snake_case : Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]: """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __snake_case : Dict = DPTModel.from_pretrained(A_) self.assertIsNotNone(A_) def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = "add" with self.assertRaises(A_): __snake_case : Optional[int] = DPTForDepthEstimation(A_) def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" __snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class a_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ (self) -> Any: """simple docstring""" __snake_case : int = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas') __snake_case : Optional[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas').to(A_) __snake_case : int = prepare_img() __snake_case : Tuple = image_processor(images=A_ , return_tensors='pt').to(A_) # forward pass with torch.no_grad(): __snake_case : List[str] = model(**A_) __snake_case : int = outputs.predicted_depth # verify the predicted depth __snake_case : str = torch.Size((1, 3_8_4, 3_8_4)) self.assertEqual(predicted_depth.shape , A_) __snake_case : str = torch.tensor( [[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(A_) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , A_ , atol=1E-4))
716
'''simple docstring''' __A = {str(digit): digit**5 for digit in range(1_0)} def _SCREAMING_SNAKE_CASE ( A : int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def _SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
61
0
'''simple docstring''' import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch __A = logging.get_logger(__name__) @dataclass class a_ : def __init__(self , __a=False , __a=False , __a=6.0 , __a=None , __a=False , __a=False , __a=None , __a="fp4" , __a=False , **__a , ) -> Optional[Any]: """simple docstring""" __snake_case : Union[str, Any] = load_in_abit __snake_case : Union[str, Any] = load_in_abit __snake_case : int = llm_inta_threshold __snake_case : Union[str, Any] = llm_inta_skip_modules __snake_case : Union[str, Any] = llm_inta_enable_fpaa_cpu_offload __snake_case : Optional[Any] = llm_inta_has_fpaa_weight __snake_case : int = bnb_abit_quant_type __snake_case : Any = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __snake_case : List[Any] = torch.floataa elif isinstance(__a , __a): __snake_case : Optional[Any] = getattr(__a , __a) elif isinstance(__a , torch.dtype): __snake_case : Tuple = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype') self.post_init() def SCREAMING_SNAKE_CASE__ (self) -> Dict: """simple docstring""" if not isinstance(self.llm_inta_threshold , __a): raise ValueError('llm_int8_threshold must be a float') if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __a): raise ValueError('llm_int8_skip_modules must be a list of strings') if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __a): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean') if not isinstance(self.llm_inta_has_fpaa_weight , __a): raise ValueError('llm_int8_has_fp16_weight must be a boolean') if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype') if not isinstance(self.bnb_abit_quant_type , __a): raise ValueError('bnb_4bit_quant_type must be a string') if not isinstance(self.bnb_abit_use_double_quant , __a): raise ValueError('bnb_4bit_use_double_quant must be a boolean') if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse( '0.39.0'): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version') def SCREAMING_SNAKE_CASE__ (self) -> List[str]: """simple docstring""" return self.load_in_abit or self.load_in_abit def SCREAMING_SNAKE_CASE__ (self) -> str: """simple docstring""" if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def SCREAMING_SNAKE_CASE__ (cls , __a , __a , **__a) -> Any: """simple docstring""" __snake_case : Any = cls(**__a) __snake_case : Dict = [] for key, value in kwargs.items(): if hasattr(__a , __a): setattr(__a , __a , __a) to_remove.append(__a) for key in to_remove: kwargs.pop(__a , __a) if return_unused_kwargs: return config, kwargs else: return config def SCREAMING_SNAKE_CASE__ (self , __a) -> Optional[Any]: """simple docstring""" with open(__a , 'w' , encoding='utf-8') as writer: __snake_case : Dict = self.to_dict() __snake_case : int = json.dumps(__a , indent=2 , sort_keys=__a) + '\n' writer.write(__a) def SCREAMING_SNAKE_CASE__ (self) -> Dict[str, Any]: """simple docstring""" __snake_case : Any = copy.deepcopy(self.__dict__) __snake_case : Tuple = str(output['bnb_4bit_compute_dtype']).split('.')[1] return output def __repr__(self) -> List[str]: """simple docstring""" return F"""{self.__class__.__name__} {self.to_json_string()}""" def SCREAMING_SNAKE_CASE__ (self , __a = True) -> str: """simple docstring""" if use_diff is True: __snake_case : Optional[int] = self.to_diff_dict() else: __snake_case : Dict = self.to_dict() return json.dumps(__a , indent=2 , sort_keys=__a) + "\n" def SCREAMING_SNAKE_CASE__ (self) -> Dict[str, Any]: """simple docstring""" __snake_case : List[Any] = self.to_dict() # get the default config dict __snake_case : Dict = BitsAndBytesConfig().to_dict() __snake_case : Tuple = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __snake_case : Any = value return serializable_config_dict
717
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class a_ : _snake_case = 42 _snake_case = None _snake_case = None def _SCREAMING_SNAKE_CASE ( ) -> Node | None: """simple docstring""" __snake_case : str = Node(1 ) __snake_case : Tuple = Node(2 ) __snake_case : Optional[int] = Node(3 ) __snake_case : List[str] = Node(4 ) __snake_case : List[str] = Node(5 ) return tree def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] if root is None: return output __snake_case : Optional[int] = deque([root] ) while process_queue: __snake_case : List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None , A : int ) -> Sequence[Node | None]: """simple docstring""" __snake_case : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def _SCREAMING_SNAKE_CASE ( A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] __snake_case : list[Sequence[Node | None]] = [] __snake_case : List[Any] = 0 __snake_case : int = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) __snake_case : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) __snake_case : Tuple = 0 return output def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing. """simple docstring""" __snake_case : Optional[int] = make_tree() print(F"""In-order Traversal: {inorder(A )}""" ) print(F"""Pre-order Traversal: {preorder(A )}""" ) print(F"""Post-order Traversal: {postorder(A )}""" , '\n' ) print(F"""Height of Tree: {height(A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
0
'''simple docstring''' import math def _SCREAMING_SNAKE_CASE ( A : Optional[Any] , A : Union[str, Any] ) -> Any: """simple docstring""" if ( not isinstance(__lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def _SCREAMING_SNAKE_CASE ( A : Any , A : int ) -> Tuple: """simple docstring""" if ( not isinstance(__lowerCAmelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class a_ : def __init__(self , __a = None) -> None: """simple docstring""" if components is None: __snake_case : List[str] = [] __snake_case : Optional[int] = list(__a) def __len__(self) -> int: """simple docstring""" return len(self.__components) def __str__(self) -> str: """simple docstring""" return "(" + ",".join(map(__a , self.__components)) + ")" def __add__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] + other.component(__a) for i in range(__a)] return Vector(__a) else: raise Exception('must have the same size') def __sub__(self , __a) -> Vector: """simple docstring""" __snake_case : Optional[Any] = len(self) if size == len(__a): __snake_case : Optional[int] = [self.__components[i] - other.component(__a) for i in range(__a)] return Vector(__a) else: # error case raise Exception('must have the same size') @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... @overload def __mul__(self , __a) -> float: """simple docstring""" ... def __mul__(self , __a) -> float | Vector: """simple docstring""" if isinstance(__a , (float, int)): __snake_case : str = [c * other for c in self.__components] return Vector(__a) elif isinstance(__a , __a) and len(self) == len(__a): __snake_case : List[Any] = len(self) __snake_case : Dict = [self.__components[i] * other.component(__a) for i in range(__a)] return sum(__a) else: # error case raise Exception('invalid operand!') def SCREAMING_SNAKE_CASE__ (self) -> Vector: """simple docstring""" return Vector(self.__components) def SCREAMING_SNAKE_CASE__ (self , __a) -> float: """simple docstring""" if isinstance(__a , __a) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception('index out of range') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> None: """simple docstring""" assert -len(self.__components) <= pos < len(self.__components) __snake_case : int = value def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if len(self.__components) == 0: raise Exception('Vector is empty') __snake_case : Tuple = [c**2 for c in self.__components] return math.sqrt(sum(__a)) def SCREAMING_SNAKE_CASE__ (self , __a , __a = False) -> float: """simple docstring""" __snake_case : Tuple = self * other __snake_case : Optional[int] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def _SCREAMING_SNAKE_CASE ( A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) return Vector([0] * dimension ) def _SCREAMING_SNAKE_CASE ( A : int , A : int ) -> Vector: """simple docstring""" assert isinstance(A , A ) and (isinstance(A , A )) __snake_case : Any = [0] * dimension __snake_case : int = 1 return Vector(A ) def _SCREAMING_SNAKE_CASE ( A : float , A : Vector , A : Vector ) -> Vector: """simple docstring""" assert ( isinstance(A , A ) and isinstance(A , A ) and (isinstance(A , (int, float) )) ) return x * scalar + y def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int ) -> Vector: """simple docstring""" random.seed(A ) __snake_case : List[Any] = [random.randint(A , A ) for _ in range(A )] return Vector(A ) class a_ : def __init__(self , __a , __a , __a) -> None: """simple docstring""" __snake_case : Union[str, Any] = matrix __snake_case : int = w __snake_case : str = h def __str__(self) -> str: """simple docstring""" __snake_case : Dict = '' for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : Tuple = [] for i in range(self.__height): __snake_case : List[Any] = [ self.__matrix[i][j] + other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrix must have the same dimension!') def __sub__(self , __a) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __snake_case : str = [] for i in range(self.__height): __snake_case : List[str] = [ self.__matrix[i][j] - other.component(__a , __a) for j in range(self.__width) ] matrix.append(__a) return Matrix(__a , self.__width , self.__height) else: raise Exception('matrices must have the same dimension!') @overload def __mul__(self , __a) -> Matrix: """simple docstring""" ... @overload def __mul__(self , __a) -> Vector: """simple docstring""" ... def __mul__(self , __a) -> Vector | Matrix: """simple docstring""" if isinstance(__a , __a): # matrix-vector if len(__a) == self.__width: __snake_case : Tuple = zero_vector(self.__height) for i in range(self.__height): __snake_case : Union[str, Any] = [ self.__matrix[i][j] * other.component(__a) for j in range(self.__width) ] ans.change_component(__a , sum(__a)) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!') elif isinstance(__a , (int, float)): # matrix-scalar __snake_case : str = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(__a , self.__width , self.__height) return None def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__height def SCREAMING_SNAKE_CASE__ (self) -> int: """simple docstring""" return self.__width def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __snake_case : List[Any] = value else: raise Exception('change_component: indices out of bounds') def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') __snake_case : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__a)): __snake_case : Tuple = minor[i][:y] + minor[i][y + 1 :] return Matrix(__a , self.__width - 1 , self.__height - 1).determinant() def SCREAMING_SNAKE_CASE__ (self , __a , __a) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__a , __a) else: raise Exception('Indices out of bounds') def SCREAMING_SNAKE_CASE__ (self) -> float: """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square') if self.__height < 1: raise Exception('Matrix has no element') elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __snake_case : Any = [ self.__matrix[0][y] * self.cofactor(0 , __a) for y in range(self.__width) ] return sum(__a) def _SCREAMING_SNAKE_CASE ( A : int ) -> Matrix: """simple docstring""" __snake_case : list[list[float]] = [[0] * n for _ in range(A )] return Matrix(A , A , A ) def _SCREAMING_SNAKE_CASE ( A : int , A : int , A : int , A : int ) -> Matrix: """simple docstring""" random.seed(A ) __snake_case : list[list[float]] = [ [random.randint(A , A ) for _ in range(A )] for _ in range(A ) ] return Matrix(A , A , A )
61
0