code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) # General docstring __UpperCamelCase : int = """MobileNetV1Config""" # Base docstring __UpperCamelCase : str = """google/mobilenet_v1_1.0_224""" __UpperCamelCase : str = [1, 1024, 7, 7] # Image classification docstring __UpperCamelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224""" __UpperCamelCase : Optional[Any] = """tabby, tabby cat""" __UpperCamelCase : Union[str, Any] = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def a_ ( _A , _A , _A=None ) -> List[Any]: """simple docstring""" snake_case__ = {} if isinstance(_A , _A ): snake_case__ = model.mobilenet_va else: snake_case__ = model snake_case__ = 'MobilenetV1/Conv2d_0/' snake_case__ = backbone.conv_stem.convolution.weight snake_case__ = backbone.conv_stem.normalization.bias snake_case__ = backbone.conv_stem.normalization.weight snake_case__ = backbone.conv_stem.normalization.running_mean snake_case__ = backbone.conv_stem.normalization.running_var for i in range(13 ): snake_case__ = i + 1 snake_case__ = i * 2 snake_case__ = backbone.layer[pt_index] snake_case__ = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' snake_case__ = pointer.convolution.weight snake_case__ = pointer.normalization.bias snake_case__ = pointer.normalization.weight snake_case__ = pointer.normalization.running_mean snake_case__ = pointer.normalization.running_var snake_case__ = backbone.layer[pt_index + 1] snake_case__ = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' snake_case__ = pointer.convolution.weight snake_case__ = pointer.normalization.bias snake_case__ = pointer.normalization.weight snake_case__ = pointer.normalization.running_mean snake_case__ = pointer.normalization.running_var if isinstance(_A , _A ): snake_case__ = 'MobilenetV1/Logits/Conv2d_1c_1x1/' snake_case__ = model.classifier.weight snake_case__ = model.classifier.bias return tf_to_pt_map def a_ ( _A , _A , _A ) -> Tuple: """simple docstring""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model snake_case__ = tf.train.list_variables(_A ) snake_case__ = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) snake_case__ = tf.train.load_variable(_A , _A ) snake_case__ = array # Build TF to PyTorch weights loading map snake_case__ = _build_tf_to_pytorch_map(_A , _A , _A ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue snake_case__ = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) snake_case__ = np.transpose(_A , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer snake_case__ = array.squeeze().transpose() else: snake_case__ = np.transpose(_A , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) snake_case__ = torch.from_numpy(_A ) tf_weights.pop(_A , _A ) tf_weights.pop(name + '/RMSProp' , _A ) tf_weights.pop(name + '/RMSProp_1' , _A ) tf_weights.pop(name + '/ExponentialMovingAverage' , _A ) logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def a_ ( _A , _A ) -> torch.Tensor: """simple docstring""" snake_case__ , snake_case__ = features.shape[-2:] snake_case__ , snake_case__ = conv_layer.stride snake_case__ , snake_case__ = conv_layer.kernel_size if in_height % stride_height == 0: snake_case__ = max(kernel_height - stride_height , 0 ) else: snake_case__ = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: snake_case__ = max(kernel_width - stride_width , 0 ) else: snake_case__ = max(kernel_width - (in_width % stride_width) , 0 ) snake_case__ = pad_along_width // 2 snake_case__ = pad_along_width - pad_left snake_case__ = pad_along_height // 2 snake_case__ = pad_along_height - pad_top snake_case__ = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_A , _A , 'constant' , 0.0 ) class __SCREAMING_SNAKE_CASE( nn.Module ): def __init__( self: Dict , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ) -> None: super().__init__() snake_case__ = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) snake_case__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) snake_case__ = nn.Convad( in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode='zeros' , ) if use_normalization: snake_case__ = nn.BatchNormad( num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , ) else: snake_case__ = None if use_activation: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = ACTaFN[use_activation] elif isinstance(config.hidden_act , UpperCamelCase ): snake_case__ = ACTaFN[config.hidden_act] else: snake_case__ = config.hidden_act else: snake_case__ = None def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: snake_case__ = apply_tf_padding(UpperCamelCase , self.convolution ) snake_case__ = self.convolution(UpperCamelCase ) if self.normalization is not None: snake_case__ = self.normalization(UpperCamelCase ) if self.activation is not None: snake_case__ = self.activation(UpperCamelCase ) return features class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = MobileNetVaConfig _UpperCAmelCase = load_tf_weights_in_mobilenet_va _UpperCAmelCase = "mobilenet_v1" _UpperCAmelCase = "pixel_values" _UpperCAmelCase = False def lowerCAmelCase_ ( self: str , UpperCamelCase: Union[nn.Linear, nn.Convad] ) -> None: if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __UpperCamelCase : Union[str, Any] = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __UpperCamelCase : str = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a_ , ) class __SCREAMING_SNAKE_CASE( a_ ): def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ) -> Tuple: super().__init__(UpperCamelCase ) snake_case__ = config snake_case__ = 32 snake_case__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) snake_case__ = MobileNetVaConvLayer( UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , ) snake_case__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] snake_case__ = nn.ModuleList() for i in range(13 ): snake_case__ = out_channels if strides[i] == 2 or i == 0: depth *= 2 snake_case__ = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) ) self.layer.append( MobileNetVaConvLayer( UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) ) snake_case__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict ) -> Union[str, Any]: raise NotImplementedError @add_start_docstrings_to_model_forward(UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: snake_case__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) snake_case__ = self.conv_stem(UpperCamelCase ) snake_case__ = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): snake_case__ = layer_module(UpperCamelCase ) if output_hidden_states: snake_case__ = all_hidden_states + (hidden_states,) snake_case__ = hidden_states if self.pooler is not None: snake_case__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 ) else: snake_case__ = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a_ , ) class __SCREAMING_SNAKE_CASE( a_ ): def __init__( self: List[Any] , UpperCamelCase: MobileNetVaConfig ) -> None: super().__init__(UpperCamelCase ) snake_case__ = config.num_labels snake_case__ = MobileNetVaModel(UpperCamelCase ) snake_case__ = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head snake_case__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase ) snake_case__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict snake_case__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase ) snake_case__ = outputs.pooler_output if return_dict else outputs[1] snake_case__ = self.classifier(self.dropout(UpperCamelCase ) ) snake_case__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: snake_case__ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): snake_case__ = 'single_label_classification' else: snake_case__ = 'multi_label_classification' if self.config.problem_type == "regression": snake_case__ = MSELoss() if self.num_labels == 1: snake_case__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: snake_case__ = loss_fct(UpperCamelCase , UpperCamelCase ) elif self.config.problem_type == "single_label_classification": snake_case__ = CrossEntropyLoss() snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": snake_case__ = BCEWithLogitsLoss() snake_case__ = loss_fct(UpperCamelCase , UpperCamelCase ) if not return_dict: snake_case__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
1
from collections.abc import Sequence from queue import Queue class __SCREAMING_SNAKE_CASE: def __init__( self: Dict , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Dict=None , UpperCamelCase: Union[str, Any]=None ) -> Tuple: snake_case__ = start snake_case__ = end snake_case__ = val snake_case__ = (start + end) // 2 snake_case__ = left snake_case__ = right def __repr__( self: Optional[Any] ) -> Union[str, Any]: return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class __SCREAMING_SNAKE_CASE: def __init__( self: Any , UpperCamelCase: Sequence , UpperCamelCase: int ) -> str: snake_case__ = collection snake_case__ = function if self.collection: snake_case__ = self._build_tree(0 , len(UpperCamelCase ) - 1 ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any ) -> int: self._update_tree(self.root , UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: int ) -> int: return self._query_range(self.root , UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ) -> Tuple: if start == end: return SegmentTreeNode(UpperCamelCase , UpperCamelCase , self.collection[start] ) snake_case__ = (start + end) // 2 snake_case__ = self._build_tree(UpperCamelCase , UpperCamelCase ) snake_case__ = self._build_tree(mid + 1 , UpperCamelCase ) return SegmentTreeNode(UpperCamelCase , UpperCamelCase , self.fn(left.val , right.val ) , UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] ) -> Tuple: if node.start == i and node.end == i: snake_case__ = val return if i <= node.mid: self._update_tree(node.left , UpperCamelCase , UpperCamelCase ) else: self._update_tree(node.right , UpperCamelCase , UpperCamelCase ) snake_case__ = self.fn(node.left.val , node.right.val ) def lowerCAmelCase_ ( self: str , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ) -> List[str]: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , UpperCamelCase , UpperCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , UpperCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase ) , ) else: # range in right child tree return self._query_range(node.right , UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] ) -> Any: if self.root is not None: snake_case__ = Queue() queue.put(self.root ) while not queue.empty(): snake_case__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) __UpperCamelCase : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "t5" _UpperCAmelCase = ["past_key_values"] _UpperCAmelCase = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self: List[Any] , UpperCamelCase: str=3_21_28 , UpperCamelCase: Dict=5_12 , UpperCamelCase: str=64 , UpperCamelCase: Optional[int]=20_48 , UpperCamelCase: Optional[int]=6 , UpperCamelCase: Tuple=None , UpperCamelCase: Union[str, Any]=8 , UpperCamelCase: Dict=32 , UpperCamelCase: int=1_28 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Tuple=1e-6 , UpperCamelCase: Any=1.0 , UpperCamelCase: Tuple="relu" , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=0 , UpperCamelCase: List[str]=1 , **UpperCamelCase: int , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = d_model snake_case__ = d_kv snake_case__ = d_ff snake_case__ = num_layers snake_case__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry snake_case__ = num_heads snake_case__ = relative_attention_num_buckets snake_case__ = relative_attention_max_distance snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = initializer_factor snake_case__ = feed_forward_proj snake_case__ = use_cache snake_case__ = self.feed_forward_proj.split('-' ) snake_case__ = act_info[-1] snake_case__ = act_info[0] == 'gated' if len(UpperCamelCase ) > 1 and act_info[0] != "gated" or len(UpperCamelCase ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": snake_case__ = 'gelu_new' super().__init__( pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , **UpperCamelCase , ) class __SCREAMING_SNAKE_CASE( a_ ): @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: snake_case__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: snake_case__ = 'past_encoder_sequence + sequence' snake_case__ = {0: 'batch'} snake_case__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: snake_case__ = {0: 'batch', 1: 'decoder_sequence'} snake_case__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction='inputs' ) return common_inputs @property def lowerCAmelCase_ ( self: Optional[int] ) -> int: return 13
307
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = DanceDiffusionPipeline _UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } _UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: str ) -> Tuple: torch.manual_seed(0 ) snake_case__ = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase , use_timestep_embedding=UpperCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) snake_case__ = IPNDMScheduler() snake_case__ = { 'unet': unet, 'scheduler': scheduler, } return components def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Dict , UpperCamelCase: List[str]=0 ) -> Tuple: if str(UpperCamelCase ).startswith('mps' ): snake_case__ = torch.manual_seed(UpperCamelCase ) else: snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) snake_case__ = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case__ = self.get_dummy_components() snake_case__ = DanceDiffusionPipeline(**UpperCamelCase ) snake_case__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = self.get_dummy_inputs(UpperCamelCase ) snake_case__ = pipe(**UpperCamelCase ) snake_case__ = output.audios snake_case__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) snake_case__ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowerCAmelCase_ ( self: Tuple ) -> str: return super().test_save_load_local() @skip_mps def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def lowerCAmelCase_ ( self: Tuple ) -> str: return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase_ ( self: str ) -> List[Any]: return super().test_attention_slicing_forward_pass() def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: int ) -> Any: snake_case__ = torch_device snake_case__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) snake_case__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = torch.manual_seed(0 ) snake_case__ = pipe(generator=UpperCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) snake_case__ = output.audios snake_case__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) snake_case__ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case__ = torch_device snake_case__ = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) snake_case__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = torch.manual_seed(0 ) snake_case__ = pipe(generator=UpperCamelCase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) snake_case__ = output.audios snake_case__ = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) snake_case__ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
307
import random from typing import Any def a_ ( _A ) -> list[Any]: """simple docstring""" for _ in range(len(_A ) ): snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": __UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
307
1
def a_ ( _A , _A ) -> int: """simple docstring""" if len(_A ) != len(_A ): raise ValueError('String lengths must match!' ) snake_case__ = 0 for chara, chara in zip(_A , _A ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
307
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
1
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = RoFormerTokenizer _UpperCAmelCase = RoFormerTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = True def lowerCAmelCase_ ( self: int ) -> Tuple: super().setUp() def lowerCAmelCase_ ( self: Tuple , **UpperCamelCase: List[Any] ) -> int: return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , **UpperCamelCase: Dict ) -> str: return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Optional[Any]: snake_case__ = '永和服装饰品有限公司,今天天气非常好' snake_case__ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = self.get_tokenizer() snake_case__ , snake_case__ = self.get_chinese_input_output_texts() snake_case__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , output_text.split() ) snake_case__ = tokens + [tokenizer.unk_token] snake_case__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case__ = self.get_rust_tokenizer() snake_case__ , snake_case__ = self.get_chinese_input_output_texts() snake_case__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , output_text.split() ) snake_case__ = tokens + [tokenizer.unk_token] snake_case__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: pass def lowerCAmelCase_ ( self: str ) -> Any: pass
307
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["image_processor", "tokenizer"] _UpperCAmelCase = "LayoutLMv2ImageProcessor" _UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase , ) snake_case__ = kwargs.pop('feature_extractor' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ = features['words'] snake_case__ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values snake_case__ = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] ) snake_case__ = images return encoded_inputs def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: str ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , ) return self.image_processor
307
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def a_ ( _A ) -> int: """simple docstring""" snake_case__ = 384 snake_case__ = 7 if "tiny" in model_name: snake_case__ = 96 snake_case__ = (2, 2, 6, 2) snake_case__ = (3, 6, 12, 24) elif "small" in model_name: snake_case__ = 96 snake_case__ = (2, 2, 18, 2) snake_case__ = (3, 6, 12, 24) elif "base" in model_name: snake_case__ = 128 snake_case__ = (2, 2, 18, 2) snake_case__ = (4, 8, 16, 32) snake_case__ = 12 snake_case__ = 512 elif "large" in model_name: snake_case__ = 192 snake_case__ = (2, 2, 18, 2) snake_case__ = (6, 12, 24, 48) snake_case__ = 12 snake_case__ = 768 # set label information snake_case__ = 150 snake_case__ = 'huggingface/label-files' snake_case__ = 'ade20k-id2label.json' snake_case__ = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) snake_case__ = {int(_A ): v for k, v in idalabel.items()} snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) snake_case__ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def a_ ( _A ) -> Optional[Any]: """simple docstring""" snake_case__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def a_ ( _A , _A , _A ) -> Optional[Any]: """simple docstring""" snake_case__ = dct.pop(_A ) snake_case__ = val def a_ ( _A , _A ) -> Tuple: """simple docstring""" snake_case__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): snake_case__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) snake_case__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) snake_case__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[:dim, :] snake_case__ = in_proj_bias[: dim] snake_case__ = in_proj_weight[ dim : dim * 2, : ] snake_case__ = in_proj_bias[ dim : dim * 2 ] snake_case__ = in_proj_weight[ -dim :, : ] snake_case__ = in_proj_bias[-dim :] # fmt: on def a_ ( _A ) -> Union[str, Any]: """simple docstring""" snake_case__ , snake_case__ = x.shape snake_case__ = x.reshape(_A , 4 , in_channel // 4 ) snake_case__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def a_ ( _A ) -> Optional[Any]: """simple docstring""" snake_case__ , snake_case__ = x.shape snake_case__ = x.reshape(_A , in_channel // 4 , 4 ) snake_case__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def a_ ( _A ) -> Any: """simple docstring""" snake_case__ = x.shape[0] snake_case__ = x.reshape(4 , in_channel // 4 ) snake_case__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def a_ ( _A ) -> List[str]: """simple docstring""" snake_case__ = x.shape[0] snake_case__ = x.reshape(in_channel // 4 , 4 ) snake_case__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def a_ ( _A , _A , _A ) -> Optional[int]: """simple docstring""" snake_case__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } snake_case__ = model_name_to_url[model_name] snake_case__ = torch.hub.load_state_dict_from_url(_A , map_location='cpu' , file_name=_A )[ 'state_dict' ] for name, param in state_dict.items(): print(_A , param.shape ) snake_case__ = get_upernet_config(_A ) snake_case__ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): snake_case__ = state_dict.pop(_A ) if "bn" in key: snake_case__ = key.replace('bn' , 'batch_norm' ) snake_case__ = val # rename keys snake_case__ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: snake_case__ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: snake_case__ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image snake_case__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' snake_case__ = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' ) snake_case__ = SegformerImageProcessor() snake_case__ = processor(_A , return_tensors='pt' ).pixel_values with torch.no_grad(): snake_case__ = model(_A ) snake_case__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": snake_case__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": snake_case__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": snake_case__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": snake_case__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_A ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_A ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __UpperCamelCase : Optional[int] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
307
def a_ ( _A = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
307
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE: def __init__( self: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple=13 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: str=3 , UpperCamelCase: int=4 , UpperCamelCase: Union[str, Any]=[10, 20, 30, 40] , UpperCamelCase: List[str]=[2, 2, 3, 2] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[str]=True , UpperCamelCase: Dict=37 , UpperCamelCase: str="gelu" , UpperCamelCase: List[str]=10 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: Dict=["stage2", "stage3", "stage4"] , UpperCamelCase: str=3 , UpperCamelCase: int=None , ) -> List[Any]: snake_case__ = parent snake_case__ = batch_size snake_case__ = image_size snake_case__ = num_channels snake_case__ = num_stages snake_case__ = hidden_sizes snake_case__ = depths snake_case__ = is_training snake_case__ = use_labels snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = out_features snake_case__ = num_labels snake_case__ = scope snake_case__ = num_stages def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]: return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowerCAmelCase_ ( self: Optional[Any] ) -> int: return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Tuple ) -> Union[str, Any]: snake_case__ = UperNetForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ): _UpperCAmelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () _UpperCAmelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case__ = UperNetModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: return def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = model_class(UpperCamelCase ) snake_case__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ = [*signature.parameters.keys()] snake_case__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self: Tuple ) -> Any: pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self: Optional[int] ) -> str: pass @unittest.skip(reason='UperNet does not have a base model' ) def lowerCAmelCase_ ( self: int ) -> int: pass @unittest.skip(reason='UperNet does not have a base model' ) def lowerCAmelCase_ ( self: List[str] ) -> Any: pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[Any] ) -> List[str]: def check_hidden_states_output(UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] ): snake_case__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): snake_case__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> Optional[Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = _config_zero_init(UpperCamelCase ) snake_case__ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: snake_case__ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason='UperNet does not have tied weights' ) def lowerCAmelCase_ ( self: Any ) -> Any: pass @slow def lowerCAmelCase_ ( self: Dict ) -> Any: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def a_ ( ) -> Optional[int]: """simple docstring""" snake_case__ = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) snake_case__ = Image.open(_A ).convert('RGB' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) snake_case__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(UpperCamelCase ) snake_case__ = prepare_img() snake_case__ = processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) with torch.no_grad(): snake_case__ = model(**UpperCamelCase ) snake_case__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) snake_case__ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) def lowerCAmelCase_ ( self: int ) -> Union[str, Any]: snake_case__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) snake_case__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(UpperCamelCase ) snake_case__ = prepare_img() snake_case__ = processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase ) with torch.no_grad(): snake_case__ = model(**UpperCamelCase ) snake_case__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) snake_case__ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
307
import os def a_ ( ) -> Optional[Any]: """simple docstring""" snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' ) with open(_A ) as file_hand: return str(sum(int(_A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
307
1
__UpperCamelCase : str = """Input must be a string of 8 numbers plus letter""" __UpperCamelCase : List[Any] = """TRWAGMYFPDXBNJZSQVHLCKE""" def a_ ( _A ) -> bool: """simple docstring""" if not isinstance(_A , _A ): snake_case__ = f'''Expected string as input, found {type(_A ).__name__}''' raise TypeError(_A ) snake_case__ = spanish_id.replace('-' , '' ).upper() if len(_A ) != 9: raise ValueError(_A ) try: snake_case__ = int(spanish_id_clean[0:8] ) snake_case__ = spanish_id_clean[8] except ValueError as ex: raise ValueError(_A ) from ex if letter.isdigit(): raise ValueError(_A ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
307
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
1
def a_ ( _A ) -> bool: """simple docstring""" return str(_A ) == str(_A )[::-1] def a_ ( _A ) -> int: """simple docstring""" return int(_A ) + int(str(_A )[::-1] ) def a_ ( _A = 10000 ) -> int: """simple docstring""" snake_case__ = [] for num in range(1 , _A ): snake_case__ = 0 snake_case__ = num while iterations < 50: snake_case__ = sum_reverse(_A ) iterations += 1 if is_palindrome(_A ): break else: lychrel_nums.append(_A ) return len(_A ) if __name__ == "__main__": print(f'''{solution() = }''')
307
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Any = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } __UpperCamelCase : Optional[Any] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def a_ ( _A , _A=1 , _A=256 ) -> str: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _A ) -> int: """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def a_ ( _A , _A , _A , _A=True ) -> List[str]: """simple docstring""" os.makedirs(_A , exist_ok=_A ) snake_case__ = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) snake_case__ = read_json(os.path.join(_A , 'params.json' ) ) snake_case__ = NUM_SHARDS[model_size] snake_case__ = params['n_layers'] snake_case__ = params['n_heads'] snake_case__ = n_heads // num_shards snake_case__ = params['dim'] snake_case__ = dim // n_heads snake_case__ = 10000.0 snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case__ = params['n_kv_heads'] # for GQA / MQA snake_case__ = n_heads_per_shard // num_key_value_heads snake_case__ = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case__ = n_heads snake_case__ = n_heads_per_shard snake_case__ = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case__ = [ torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(_A ) ] snake_case__ = 0 snake_case__ = {'weight_map': {}} for layer_i in range(_A ): snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case__ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) snake_case__ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = inv_freq for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case__ = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs snake_case__ = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256 snake_case__ = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def a_ ( _A , _A ) -> Tuple: """simple docstring""" # Initialize the tokenizer based on the `spm` model snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case__ = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def a_ ( ) -> str: """simple docstring""" snake_case__ = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) snake_case__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
307
1
from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCamelCase : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE: def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: str=13 , UpperCamelCase: Optional[Any]=7 , UpperCamelCase: str=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Tuple=99 , UpperCamelCase: Tuple=16 , UpperCamelCase: Dict=36 , UpperCamelCase: Union[str, Any]=6 , UpperCamelCase: Any=6 , UpperCamelCase: Dict=6 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Tuple="gelu" , UpperCamelCase: int=0.1 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Any=5_12 , UpperCamelCase: Tuple=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Optional[Any]=3 , UpperCamelCase: int=4 , UpperCamelCase: Tuple=None , ) -> Optional[Any]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = embedding_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_hidden_groups snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple ) -> Tuple: snake_case__ = AlbertModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) snake_case__ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ) -> str: snake_case__ = AlbertForPreTraining(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[int] ) -> Dict: snake_case__ = AlbertForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: Dict ) -> Optional[Any]: snake_case__ = AlbertForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] ) -> int: snake_case__ = self.num_labels snake_case__ = AlbertForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: int ) -> str: snake_case__ = self.num_labels snake_case__ = AlbertForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: int ) -> Union[str, Any]: snake_case__ = self.num_choices snake_case__ = AlbertForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ): _UpperCAmelCase = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int]=False ) -> List[Any]: snake_case__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class in get_values(UpperCamelCase ): snake_case__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase ) snake_case__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def lowerCAmelCase_ ( self: str ) -> Any: snake_case__ = AlbertModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: str ) -> List[str]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def lowerCAmelCase_ ( self: str ) -> Optional[Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def lowerCAmelCase_ ( self: str ) -> Tuple: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) @slow def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = AlbertModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case__ = AlbertModel.from_pretrained('albert-base-v2' ) snake_case__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) snake_case__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] snake_case__ = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCamelCase ) snake_case__ = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1
from __future__ import annotations from math import gcd def a_ ( _A , _A = 2 , _A = 1 , _A = 3 , ) -> int | None: """simple docstring""" # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_A , _A , _A ) -> int: return (pow(_A , 2 ) + step) % modulus for _ in range(_A ): # These track the position within the cycle detection logic. snake_case__ = seed snake_case__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case__ = rand_fn(_A , _A , _A ) snake_case__ = rand_fn(_A , _A , _A ) snake_case__ = rand_fn(_A , _A , _A ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case__ = gcd(hare - tortoise , _A ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) __UpperCamelCase : Optional[int] = parser.parse_args() __UpperCamelCase : Any = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: __UpperCamelCase : str = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
307
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __UpperCamelCase : int = 299792458 # Symbols __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""") def a_ ( _A ) -> float: """simple docstring""" if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def a_ ( _A ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(_A ) ** 2 ) def a_ ( _A ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0], [-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def a_ ( _A , _A = None ) -> np.ndarray: """simple docstring""" # Ensure event is not empty if event is None: snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __UpperCamelCase : List[Any] = transform(29979245) print("""Example of four vector: """) print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1} __UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
307
1
from __future__ import annotations def a_ ( _A ) -> bool: """simple docstring""" snake_case__ = len(_A ) # We need to create solution object to save path. snake_case__ = [[0 for _ in range(_A )] for _ in range(_A )] snake_case__ = run_maze(_A , 0 , 0 , _A ) if solved: print('\n'.join(str(_A ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a_ ( _A , _A , _A , _A ) -> bool: """simple docstring""" snake_case__ = len(_A ) # Final check point. if i == j == (size - 1): snake_case__ = 1 return True snake_case__ = (not i < 0) and (not j < 0) # Check lower bounds snake_case__ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case__ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case__ = 1 # check for directions if ( run_maze(_A , i + 1 , _A , _A ) or run_maze(_A , _A , j + 1 , _A ) or run_maze(_A , i - 1 , _A , _A ) or run_maze(_A , _A , j - 1 , _A ) ): return True snake_case__ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
307
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "EncodecFeatureExtractor" _UpperCAmelCase = ("T5Tokenizer", "T5TokenizerFast") def __init__( self: Dict , UpperCamelCase: int , UpperCamelCase: str ) -> Optional[int]: super().__init__(UpperCamelCase , UpperCamelCase ) snake_case__ = self.feature_extractor snake_case__ = False def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[Any]=None , UpperCamelCase: List[str]=True ) -> List[str]: return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase , language=UpperCamelCase , no_timestamps=UpperCamelCase ) def __call__( self: Any , *UpperCamelCase: str , **UpperCamelCase: int ) -> Union[str, Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase , **UpperCamelCase ) snake_case__ = kwargs.pop('audio' , UpperCamelCase ) snake_case__ = kwargs.pop('sampling_rate' , UpperCamelCase ) snake_case__ = kwargs.pop('text' , UpperCamelCase ) if len(UpperCamelCase ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: snake_case__ = self.tokenizer(UpperCamelCase , **UpperCamelCase ) if audio is not None: snake_case__ = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: snake_case__ = audio_inputs['input_values'] if "padding_mask" in audio_inputs: snake_case__ = audio_inputs['padding_mask'] return inputs def lowerCAmelCase_ ( self: Any , *UpperCamelCase: Any , **UpperCamelCase: List[str] ) -> List[str]: snake_case__ = kwargs.pop('audio' , UpperCamelCase ) snake_case__ = kwargs.pop('padding_mask' , UpperCamelCase ) if len(UpperCamelCase ) > 0: snake_case__ = args[0] snake_case__ = args[1:] if audio_values is not None: return self._decode_audio(UpperCamelCase , padding_mask=UpperCamelCase ) else: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: int , *UpperCamelCase: Any , **UpperCamelCase: Union[str, Any] ) -> Optional[int]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: Tuple , UpperCamelCase: Optional = None ) -> List[np.ndarray]: snake_case__ = to_numpy(UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ = audio_values.shape if padding_mask is None: return list(UpperCamelCase ) snake_case__ = to_numpy(UpperCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) snake_case__ = seq_len - padding_mask.shape[-1] snake_case__ = 1 - self.feature_extractor.padding_value snake_case__ = np.pad(UpperCamelCase , ((0, 0), (0, difference)) , 'constant' , constant_values=UpperCamelCase ) snake_case__ = audio_values.tolist() for i in range(UpperCamelCase ): snake_case__ = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] snake_case__ = sliced_audio.reshape(UpperCamelCase , -1 ) return audio_values
307
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {"""vocab_file""": """spiece.model"""} __UpperCamelCase : Any = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 __UpperCamelCase : Tuple = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } __UpperCamelCase : Optional[Any] = """▁""" class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) snake_case__ = legacy snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , ) snake_case__ = vocab_file snake_case__ = extra_ids snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any: if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , ) return max_model_length @property def lowerCAmelCase_ ( self: Tuple ) -> List[str]: return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase )) + [1] return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return list( set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()] def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]: if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self: Union[str, Any] ) -> List[str]: snake_case__ = self.__dict__.copy() snake_case__ = None return state def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]: snake_case__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ = {} snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]: # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' ) return super().tokenize(UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str: if not self.legacy: snake_case__ = text.startswith(UpperCamelCase ) if is_first: snake_case__ = text[1:] snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ): snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict: if token.startswith('<extra_id_' ): snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase ) snake_case__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple: if index < self.sp_model.get_piece_size(): snake_case__ = self.sp_model.IdToPiece(UpperCamelCase ) else: snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict: snake_case__ = [] snake_case__ = '' snake_case__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase ) + token snake_case__ = True snake_case__ = [] else: current_sub_tokens.append(UpperCamelCase ) snake_case__ = False out_string += self.sp_model.decode(UpperCamelCase ) return out_string.strip() def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , 'wb' ) as fi: snake_case__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,)
307
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = KandinskyImgaImgPipeline _UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"] _UpperCAmelCase = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] _UpperCAmelCase = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _UpperCAmelCase = False @property def lowerCAmelCase_ ( self: List[str] ) -> int: return 32 @property def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: return 32 @property def lowerCAmelCase_ ( self: Any ) -> Optional[int]: return self.time_input_dim @property def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: return self.time_input_dim * 4 @property def lowerCAmelCase_ ( self: Dict ) -> Tuple: return 1_00 @property def lowerCAmelCase_ ( self: List[str] ) -> Any: snake_case__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def lowerCAmelCase_ ( self: int ) -> Optional[int]: torch.manual_seed(0 ) snake_case__ = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) snake_case__ = MultilingualCLIP(UpperCamelCase ) snake_case__ = text_encoder.eval() return text_encoder @property def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: torch.manual_seed(0 ) snake_case__ = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case__ = UNetaDConditionModel(**UpperCamelCase ) return model @property def lowerCAmelCase_ ( self: Any ) -> int: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase_ ( self: Tuple ) -> List[str]: torch.manual_seed(0 ) snake_case__ = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: snake_case__ = self.dummy_text_encoder snake_case__ = self.dummy_tokenizer snake_case__ = self.dummy_unet snake_case__ = self.dummy_movq snake_case__ = { 'num_train_timesteps': 10_00, 'beta_schedule': 'linear', 'beta_start': 0.00_085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } snake_case__ = DDIMScheduler(**UpperCamelCase ) snake_case__ = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: List[str]=0 ) -> List[Any]: snake_case__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) snake_case__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) ) if str(UpperCamelCase ).startswith('mps' ): snake_case__ = torch.manual_seed(UpperCamelCase ) else: snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) snake_case__ = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def lowerCAmelCase_ ( self: int ) -> Optional[Any]: snake_case__ = 'cpu' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**UpperCamelCase ) snake_case__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) snake_case__ = output.images snake_case__ = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] snake_case__ = image[0, -3:, -3:, -1] snake_case__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) snake_case__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case__ = 'A red cartoon frog, 4k' snake_case__ = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) snake_case__ = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa ) snake_case__ = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case__ , snake_case__ = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple() snake_case__ = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , ) snake_case__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
307
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE: def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: List[str] ) -> Dict: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict: snake_case__ = LlamaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str: snake_case__ = True snake_case__ = LlamaModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any: snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]: snake_case__ = True snake_case__ = True snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # first forward pass snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , ) snake_case__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] # select random slice snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def lowerCAmelCase_ ( self: int ) -> Dict: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ): _UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else () _UpperCAmelCase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = LlamaModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'single_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'multi_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self: Dict ) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ids_tensor([1, 10] , config.vocab_size ) snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = LlamaModel(UpperCamelCase ) original_model.to(UpperCamelCase ) original_model.eval() snake_case__ = original_model(UpperCamelCase ).last_hidden_state snake_case__ = original_model(UpperCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = {'type': scaling_type, 'factor': 10.0} snake_case__ = LlamaModel(UpperCamelCase ) scaled_model.to(UpperCamelCase ) scaled_model.eval() snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: int ) -> List[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) snake_case__ = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # fmt: off snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' snake_case__ = 'Simply put, the theory of relativity states that ' snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' ) snake_case__ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase ) # greedy generation outputs snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase ) snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
307
1
def a_ ( _A ) -> List[Any]: """simple docstring""" snake_case__ = [0] * len(_A ) snake_case__ = [] snake_case__ = [1] * len(_A ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_A ) ): if indegree[i] == 0: queue.append(_A ) while queue: snake_case__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: snake_case__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(_A ) print(max(_A ) ) # Adjacency list of Graph __UpperCamelCase : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
307
from math import isclose, sqrt def a_ ( _A , _A , _A ) -> tuple[float, float, float]: """simple docstring""" snake_case__ = point_y / 4 / point_x snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case__ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case__ = outgoing_gradient**2 + 4 snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case__ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case__ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case__ = x_minus if isclose(_A , _A ) else x_plus snake_case__ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( _A = 1.4 , _A = -9.6 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = first_x_coord snake_case__ = first_y_coord snake_case__ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
307
1
def a_ ( _A , _A ) -> Optional[Any]: """simple docstring""" print('\nThe shortest path matrix using Floyd Warshall algorithm\n' ) for i in range(_A ): for j in range(_A ): if dist[i][j] != float('inf' ): print(int(dist[i][j] ) , end='\t' ) else: print('INF' , end='\t' ) print() def a_ ( _A , _A ) -> List[str]: """simple docstring""" snake_case__ = [[float('inf' ) for _ in range(_A )] for _ in range(_A )] for i in range(_A ): for j in range(_A ): snake_case__ = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_A ): # looping through rows of graph array for i in range(_A ): # looping through columns of graph array for j in range(_A ): if ( dist[i][k] != float('inf' ) and dist[k][j] != float('inf' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case__ = dist[i][k] + dist[k][j] _print_dist(_A , _A ) return dist, v if __name__ == "__main__": __UpperCamelCase : Tuple = int(input("""Enter number of vertices: """)) __UpperCamelCase : int = int(input("""Enter number of edges: """)) __UpperCamelCase : Dict = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): __UpperCamelCase : Optional[Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) __UpperCamelCase : List[str] = int(input("""Enter source:""")) __UpperCamelCase : int = int(input("""Enter destination:""")) __UpperCamelCase : Union[str, Any] = float(input("""Enter weight:""")) __UpperCamelCase : Dict = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
307
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int: super().__init__(features=UpperCamelCase ) snake_case__ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]: import torch if isinstance(UpperCamelCase , UpperCamelCase ) and column: if all( isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase ) return column def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]: import torch if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ): return value elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ = {} if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): snake_case__ = {'dtype': torch.intaa} elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase , PIL.Image.Image ): snake_case__ = np.asarray(UpperCamelCase ) return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ): snake_case__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) elif isinstance(UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]: return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase ) return self.recursive_tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor": snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) snake_case__ = self._consolidate(UpperCamelCase ) return column def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) for column_name in batch: snake_case__ = self._consolidate(batch[column_name] ) return batch
307
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) __UpperCamelCase : str = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "unispeech" def __init__( self: Any , UpperCamelCase: int=32 , UpperCamelCase: Tuple=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: Union[str, Any]=12 , UpperCamelCase: str=30_72 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: str=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.02 , UpperCamelCase: Optional[int]=1e-5 , UpperCamelCase: Optional[int]="group" , UpperCamelCase: List[Any]="gelu" , UpperCamelCase: List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase: str=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase: List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase: Tuple=False , UpperCamelCase: Dict=1_28 , UpperCamelCase: Optional[int]=16 , UpperCamelCase: Tuple=False , UpperCamelCase: List[Any]=True , UpperCamelCase: str=0.05 , UpperCamelCase: Optional[int]=10 , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: List[Any]=10 , UpperCamelCase: Union[str, Any]=0 , UpperCamelCase: List[str]=3_20 , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: int=0.1 , UpperCamelCase: int=1_00 , UpperCamelCase: Any=2_56 , UpperCamelCase: str=2_56 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: List[Any]="mean" , UpperCamelCase: Optional[Any]=False , UpperCamelCase: str=False , UpperCamelCase: Dict=2_56 , UpperCamelCase: List[Any]=80 , UpperCamelCase: Union[str, Any]=0 , UpperCamelCase: Any=1 , UpperCamelCase: Any=2 , UpperCamelCase: int=0.5 , **UpperCamelCase: Union[str, Any] , ) -> Tuple: super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase ) snake_case__ = list(UpperCamelCase ) snake_case__ = list(UpperCamelCase ) snake_case__ = conv_bias snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim ) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length snake_case__ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = feat_quantizer_dropout snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # pretraining loss snake_case__ = replace_prob @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
307
import doctest from collections import deque import numpy as np class __SCREAMING_SNAKE_CASE: def __init__( self: Dict ) -> None: snake_case__ = [2, 1, 2, -1] snake_case__ = [1, 2, 3, 4] def lowerCAmelCase_ ( self: List[str] ) -> list[float]: snake_case__ = len(self.first_signal ) snake_case__ = len(self.second_signal ) snake_case__ = max(UpperCamelCase , UpperCamelCase ) # create a zero matrix of max_length x max_length snake_case__ = [[0] * max_length for i in range(UpperCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase ): snake_case__ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase ) for j, item in enumerate(UpperCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
307
1
# using dfs for finding eulerian path traversal def a_ ( _A , _A , _A , _A=None ) -> Any: """simple docstring""" snake_case__ = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: snake_case__ , snake_case__ = True, True snake_case__ = dfs(_A , _A , _A , _A ) return path def a_ ( _A , _A ) -> Any: """simple docstring""" snake_case__ = 0 snake_case__ = -1 for i in range(_A ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 snake_case__ = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def a_ ( _A , _A ) -> Dict: """simple docstring""" snake_case__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] snake_case__ , snake_case__ = check_circuit_or_path(_A , _A ) if check == 3: print('graph is not Eulerian' ) print('no path' ) return snake_case__ = 1 if check == 2: snake_case__ = odd_node print('graph has a Euler path' ) if check == 1: print('graph has a Euler cycle' ) snake_case__ = dfs(_A , _A , _A ) print(_A ) def a_ ( ) -> int: """simple docstring""" snake_case__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} snake_case__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} snake_case__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} snake_case__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]} snake_case__ = { 1: [], 2: [] # all degree is zero } snake_case__ = 10 check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) check_euler(_A , _A ) if __name__ == "__main__": main()
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) snake_case__ = [] for i in range(_A ): snake_case__ = i / num_diffusion_timesteps snake_case__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str: if trained_betas is not None: snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) snake_case__ = 1.0 - self.betas snake_case__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = use_karras_sigmas def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str: if schedule_timesteps is None: snake_case__ = self.timesteps snake_case__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0 else: snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep snake_case__ = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: snake_case__ = self.index_for_timestep(UpperCamelCase ) snake_case__ = self.sigmas[step_index] snake_case__ = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str: snake_case__ = num_inference_steps snake_case__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": snake_case__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": snake_case__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) snake_case__ = np.log(UpperCamelCase ) snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase ) if self.config.use_karras_sigmas: snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps ) snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] ) snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ) snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) snake_case__ = torch.from_numpy(UpperCamelCase ) snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase ).startswith('mps' ): # mps does not support float64 snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa ) else: snake_case__ = timesteps.to(device=UpperCamelCase ) # empty dt and derivative snake_case__ = None snake_case__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter snake_case__ = defaultdict(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple: # get log sigma snake_case__ = np.log(UpperCamelCase ) # get distribution snake_case__ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) snake_case__ = low_idx + 1 snake_case__ = log_sigmas[low_idx] snake_case__ = log_sigmas[high_idx] # interpolate sigmas snake_case__ = (low - log_sigma) / (low - high) snake_case__ = np.clip(UpperCamelCase , 0 , 1 ) # transform interpolation to time range snake_case__ = (1 - w) * low_idx + w * high_idx snake_case__ = t.reshape(sigma.shape ) return t def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor: snake_case__ = in_sigmas[-1].item() snake_case__ = in_sigmas[0].item() snake_case__ = 7.0 # 7.0 is the value used in the paper snake_case__ = np.linspace(0 , 1 , UpperCamelCase ) snake_case__ = sigma_min ** (1 / rho) snake_case__ = sigma_max ** (1 / rho) snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: return self.dt is None def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: snake_case__ = self.index_for_timestep(UpperCamelCase ) # advance index counter by 1 snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: snake_case__ = self.sigmas[step_index] snake_case__ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method snake_case__ = self.sigmas[step_index - 1] snake_case__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API snake_case__ = 0 snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": snake_case__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: snake_case__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order snake_case__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep snake_case__ = sigma_next - sigma_hat # store for 2nd order step snake_case__ = derivative snake_case__ = dt snake_case__ = sample else: # 2. 2nd order / Heun's method snake_case__ = (sample - pred_original_sample) / sigma_next snake_case__ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample snake_case__ = self.dt snake_case__ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ): # mps does not support float64 snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: snake_case__ = self.timesteps.to(original_samples.device ) snake_case__ = timesteps.to(original_samples.device ) snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps] snake_case__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): snake_case__ = sigma.unsqueeze(-1 ) snake_case__ = original_samples + noise * sigma return noisy_samples def __len__( self: List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
307
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "deta" _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: Any , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: str=9_00 , UpperCamelCase: List[str]=20_48 , UpperCamelCase: List[Any]=6 , UpperCamelCase: str=20_48 , UpperCamelCase: str=8 , UpperCamelCase: Optional[int]=6 , UpperCamelCase: str=10_24 , UpperCamelCase: Dict=8 , UpperCamelCase: Dict=0.0 , UpperCamelCase: Tuple=True , UpperCamelCase: Union[str, Any]="relu" , UpperCamelCase: int=2_56 , UpperCamelCase: Any=0.1 , UpperCamelCase: Union[str, Any]=0.0 , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: Dict=1.0 , UpperCamelCase: Tuple=True , UpperCamelCase: Any=False , UpperCamelCase: Any="sine" , UpperCamelCase: Any=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: Any=4 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=3_00 , UpperCamelCase: str=True , UpperCamelCase: int=True , UpperCamelCase: Any=1 , UpperCamelCase: Dict=5 , UpperCamelCase: Tuple=2 , UpperCamelCase: Dict=1 , UpperCamelCase: Union[str, Any]=1 , UpperCamelCase: str=5 , UpperCamelCase: int=2 , UpperCamelCase: int=0.1 , UpperCamelCase: Any=0.25 , **UpperCamelCase: str , ) -> Any: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) snake_case__ = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = backbone_config.pop('model_type' ) snake_case__ = CONFIG_MAPPING[backbone_model_type] snake_case__ = config_class.from_dict(UpperCamelCase ) snake_case__ = backbone_config snake_case__ = num_queries snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = encoder_ffn_dim snake_case__ = encoder_layers snake_case__ = encoder_attention_heads snake_case__ = decoder_ffn_dim snake_case__ = decoder_layers snake_case__ = decoder_attention_heads snake_case__ = dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = activation_function snake_case__ = init_std snake_case__ = init_xavier_std snake_case__ = encoder_layerdrop snake_case__ = auxiliary_loss snake_case__ = position_embedding_type # deformable attributes snake_case__ = num_feature_levels snake_case__ = encoder_n_points snake_case__ = decoder_n_points snake_case__ = two_stage snake_case__ = two_stage_num_proposals snake_case__ = with_box_refine snake_case__ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher snake_case__ = class_cost snake_case__ = bbox_cost snake_case__ = giou_cost # Loss coefficients snake_case__ = mask_loss_coefficient snake_case__ = dice_loss_coefficient snake_case__ = bbox_loss_coefficient snake_case__ = giou_loss_coefficient snake_case__ = eos_coefficient snake_case__ = focal_alpha super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: Tuple ) -> int: return self.encoder_attention_heads @property def lowerCAmelCase_ ( self: Tuple ) -> int: return self.d_model def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: snake_case__ = copy.deepcopy(self.__dict__ ) snake_case__ = self.backbone_config.to_dict() snake_case__ = self.__class__.model_type return output
307
from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCamelCase : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
def a_ ( _A , _A ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def a_ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
307
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a_ ( ) -> Optional[int]: """simple docstring""" snake_case__ = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=_A , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=_A , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=_A ) return parser.parse_args() def a_ ( ) -> str: """simple docstring""" snake_case__ = parse_args() # Import training_script as a module. snake_case__ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case__ = script_fpath.stem snake_case__ = importlib.import_module(_A ) # Patch sys.argv snake_case__ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = OpenAIGPTTokenizer _UpperCAmelCase = OpenAIGPTTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = False def lowerCAmelCase_ ( self: Any ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) snake_case__ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', ''] snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(UpperCamelCase ) ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: int ) -> Any: return "lower newer", "lower newer" def lowerCAmelCase_ ( self: Optional[Any] ) -> List[str]: snake_case__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) snake_case__ = 'lower' snake_case__ = ['low', 'er</w>'] snake_case__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) snake_case__ = tokens + ['<unk>'] snake_case__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int]=15 ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input snake_case__ = 'This is a simple input' snake_case__ = ['This is a simple input 1', 'This is a simple input 2'] snake_case__ = ('This is a simple input', 'This is a pair') snake_case__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , ) def lowerCAmelCase_ ( self: int ) -> str: pass @require_ftfy @require_spacy @require_tokenizers class __SCREAMING_SNAKE_CASE( a_ ): pass
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from __future__ import annotations import math import random from typing import Any class __SCREAMING_SNAKE_CASE: def __init__( self: Tuple ) -> None: snake_case__ = [] snake_case__ = 0 snake_case__ = 0 def lowerCAmelCase_ ( self: int ) -> bool: return self.head == self.tail def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Any ) -> None: self.data.append(UpperCamelCase ) snake_case__ = self.tail + 1 def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: snake_case__ = self.data[self.head] snake_case__ = self.head + 1 return ret def lowerCAmelCase_ ( self: Tuple ) -> int: return self.tail - self.head def lowerCAmelCase_ ( self: Dict ) -> None: print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class __SCREAMING_SNAKE_CASE: def __init__( self: Dict , UpperCamelCase: Any ) -> None: snake_case__ = data snake_case__ = None snake_case__ = None snake_case__ = 1 def lowerCAmelCase_ ( self: str ) -> Any: return self.data def lowerCAmelCase_ ( self: List[Any] ) -> MyNode | None: return self.left def lowerCAmelCase_ ( self: Optional[int] ) -> MyNode | None: return self.right def lowerCAmelCase_ ( self: Optional[int] ) -> int: return self.height def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Any ) -> None: snake_case__ = data def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: MyNode | None ) -> None: snake_case__ = node def lowerCAmelCase_ ( self: str , UpperCamelCase: MyNode | None ) -> None: snake_case__ = node def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: int ) -> None: snake_case__ = height def a_ ( _A ) -> int: """simple docstring""" if node is None: return 0 return node.get_height() def a_ ( _A , _A ) -> int: """simple docstring""" if a > b: return a return b def a_ ( _A ) -> MyNode: """simple docstring""" print('left rotation node:' , node.get_data() ) snake_case__ = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_A ) snake_case__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_A ) snake_case__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_A ) return ret def a_ ( _A ) -> MyNode: """simple docstring""" print('right rotation node:' , node.get_data() ) snake_case__ = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_A ) snake_case__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_A ) snake_case__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_A ) return ret def a_ ( _A ) -> MyNode: """simple docstring""" snake_case__ = node.get_left() assert left_child is not None node.set_left(left_rotation(_A ) ) return right_rotation(_A ) def a_ ( _A ) -> MyNode: """simple docstring""" snake_case__ = node.get_right() assert right_child is not None node.set_right(right_rotation(_A ) ) return left_rotation(_A ) def a_ ( _A , _A ) -> MyNode | None: """simple docstring""" if node is None: return MyNode(_A ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _A ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected snake_case__ = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child snake_case__ = right_rotation(_A ) else: snake_case__ = lr_rotation(_A ) else: node.set_right(insert_node(node.get_right() , _A ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: snake_case__ = node.get_right() assert right_child is not None if data < right_child.get_data(): snake_case__ = rl_rotation(_A ) else: snake_case__ = left_rotation(_A ) snake_case__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_A ) return node def a_ ( _A ) -> Any: """simple docstring""" while True: snake_case__ = root.get_right() if right_child is None: break snake_case__ = right_child return root.get_data() def a_ ( _A ) -> Any: """simple docstring""" while True: snake_case__ = root.get_left() if left_child is None: break snake_case__ = left_child return root.get_data() def a_ ( _A , _A ) -> MyNode | None: """simple docstring""" snake_case__ = root.get_left() snake_case__ = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: snake_case__ = get_left_most(_A ) root.set_data(_A ) root.set_right(del_node(_A , _A ) ) elif left_child is not None: snake_case__ = left_child elif right_child is not None: snake_case__ = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(_A , _A ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_A , _A ) ) if get_height(_A ) - get_height(_A ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): snake_case__ = left_rotation(_A ) else: snake_case__ = rl_rotation(_A ) elif get_height(_A ) - get_height(_A ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): snake_case__ = right_rotation(_A ) else: snake_case__ = lr_rotation(_A ) snake_case__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(_A ) return root class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> None: snake_case__ = None def lowerCAmelCase_ ( self: Any ) -> int: return get_height(self.root ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Any ) -> None: print('insert:' + str(UpperCamelCase ) ) snake_case__ = insert_node(self.root , UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Any ) -> None: print('delete:' + str(UpperCamelCase ) ) if self.root is None: print('Tree is empty!' ) return snake_case__ = del_node(self.root , UpperCamelCase ) def __str__( self: Optional[Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree snake_case__ = '' snake_case__ = MyQueue() q.push(self.root ) snake_case__ = self.get_height() if layer == 0: return output snake_case__ = 0 while not q.is_empty(): snake_case__ = q.pop() snake_case__ = ' ' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(UpperCamelCase ) q.push(UpperCamelCase ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space snake_case__ = cnt + 1 for i in range(1_00 ): if cnt == math.pow(2 , UpperCamelCase ) - 1: snake_case__ = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def a_ ( ) -> None: """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() __UpperCamelCase : Tuple = AVLtree() __UpperCamelCase : str = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
307
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
1
def a_ ( _A ) -> str: """simple docstring""" return "".join([hex(_A )[2:].zfill(2 ).upper() for byte in list(_A )] ) def a_ ( _A ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_A ) % 2) != 0: raise ValueError( 'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_A ) <= set('0123456789ABCDEF' ): raise ValueError( 'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_A ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
307
import random from typing import Any def a_ ( _A ) -> list[Any]: """simple docstring""" for _ in range(len(_A ) ): snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": __UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
307
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = HfArgumentParser(_A ) snake_case__ = parser.parse_args_into_dataclasses()[0] snake_case__ = TensorFlowBenchmark(args=_A ) try: snake_case__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case__ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' snake_case__ = ' '.join(str(_A ).split(' ' )[:-1] ) snake_case__ = '' snake_case__ = eval(str(_A ).split(' ' )[-1] ) snake_case__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_A ) if len(_A ) > 0: snake_case__ = full_error_msg + begin_error_msg + str(_A ) raise ValueError(_A ) benchmark.run() if __name__ == "__main__": main()
307
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
1
from __future__ import annotations def a_ ( _A , _A ) -> bool: """simple docstring""" snake_case__ = get_failure_array(_A ) # 2) Step through text searching for pattern snake_case__ , snake_case__ = 0, 0 # index into text, pattern while i < len(_A ): if pattern[j] == text[i]: if j == (len(_A ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case__ = failure[j - 1] continue i += 1 return False def a_ ( _A ) -> list[int]: """simple docstring""" snake_case__ = [0] snake_case__ = 0 snake_case__ = 1 while j < len(_A ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case__ = failure[i - 1] continue j += 1 failure.append(_A ) return failure if __name__ == "__main__": # Test 1) __UpperCamelCase : int = """abc1abc12""" __UpperCamelCase : List[str] = """alskfjaldsabc1abc1abc12k23adsfabcabc""" __UpperCamelCase : Optional[Any] = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __UpperCamelCase : Optional[Any] = """ABABX""" __UpperCamelCase : int = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) __UpperCamelCase : Dict = """AAAB""" __UpperCamelCase : List[Any] = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) __UpperCamelCase : int = """abcdabcy""" __UpperCamelCase : Optional[Any] = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) __UpperCamelCase : List[str] = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
307
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["image_processor", "tokenizer"] _UpperCAmelCase = "LayoutLMv2ImageProcessor" _UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase , ) snake_case__ = kwargs.pop('feature_extractor' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ = features['words'] snake_case__ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values snake_case__ = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] ) snake_case__ = images return encoded_inputs def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: str ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , ) return self.image_processor
307
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def a_ ( _A , _A , _A , _A , _A ) -> List[Any]: """simple docstring""" # Load configuration defined in the metadata file with open(_A ) as metadata_file: snake_case__ = json.load(_A ) snake_case__ = LukeConfig(use_entity_aware_attention=_A , **metadata['model_config'] ) # Load in the weights from the checkpoint_path snake_case__ = torch.load(_A , map_location='cpu' )['module'] # Load the entity vocab file snake_case__ = load_original_entity_vocab(_A ) # add an entry for [MASK2] snake_case__ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 snake_case__ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks snake_case__ = AddedToken('<ent>' , lstrip=_A , rstrip=_A ) snake_case__ = AddedToken('<ent2>' , lstrip=_A , rstrip=_A ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'r' ) as f: snake_case__ = json.load(_A ) snake_case__ = 'MLukeTokenizer' with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' ) as f: json.dump(_A , _A ) with open(os.path.join(_A , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(_A , _A ) snake_case__ = MLukeTokenizer.from_pretrained(_A ) # Initialize the embeddings of the special tokens snake_case__ = tokenizer.convert_tokens_to_ids(['@'] )[0] snake_case__ = tokenizer.convert_tokens_to_ids(['#'] )[0] snake_case__ = state_dict['embeddings.word_embeddings.weight'] snake_case__ = word_emb[ent_init_index].unsqueeze(0 ) snake_case__ = word_emb[enta_init_index].unsqueeze(0 ) snake_case__ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: snake_case__ = state_dict[bias_name] snake_case__ = decoder_bias[ent_init_index].unsqueeze(0 ) snake_case__ = decoder_bias[enta_init_index].unsqueeze(0 ) snake_case__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: snake_case__ = f'''encoder.layer.{layer_index}.attention.self.''' snake_case__ = state_dict[prefix + matrix_name] snake_case__ = state_dict[prefix + matrix_name] snake_case__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks snake_case__ = state_dict['entity_embeddings.entity_embeddings.weight'] snake_case__ = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 ) snake_case__ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' snake_case__ = state_dict['entity_predictions.bias'] snake_case__ = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 ) snake_case__ = torch.cat([entity_prediction_bias, entity_mask_bias] ) snake_case__ = LukeForMaskedLM(config=_A ).eval() state_dict.pop('entity_predictions.decoder.weight' ) state_dict.pop('lm_head.decoder.weight' ) state_dict.pop('lm_head.decoder.bias' ) snake_case__ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )): snake_case__ = state_dict[key] else: snake_case__ = state_dict[key] snake_case__ , snake_case__ = model.load_state_dict(_A , strict=_A ) if set(_A ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(_A ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs snake_case__ = MLukeTokenizer.from_pretrained(_A , task='entity_classification' ) snake_case__ = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).' snake_case__ = (0, 9) snake_case__ = tokenizer(_A , entity_spans=[span] , return_tensors='pt' ) snake_case__ = model(**_A ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base snake_case__ = torch.Size((1, 33, 768) ) snake_case__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base snake_case__ = torch.Size((1, 1, 768) ) snake_case__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _A , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction snake_case__ = MLukeTokenizer.from_pretrained(_A ) snake_case__ = 'Tokyo is the capital of <mask>.' snake_case__ = (24, 30) snake_case__ = tokenizer(_A , entity_spans=[span] , return_tensors='pt' ) snake_case__ = model(**_A ) snake_case__ = encoding['input_ids'][0].tolist() snake_case__ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) ) snake_case__ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_A ) snake_case__ = outputs.entity_logits[0][0].argmax().item() snake_case__ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(_A ) ) model.save_pretrained(_A ) def a_ ( _A ) -> List[Any]: """simple docstring""" snake_case__ = ['[MASK]', '[PAD]', '[UNK]'] snake_case__ = [json.loads(_A ) for line in open(_A )] snake_case__ = {} for entry in data: snake_case__ = entry['id'] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: snake_case__ = entity_id break snake_case__ = f'''{language}:{entity_name}''' snake_case__ = entity_id return new_mapping if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) __UpperCamelCase : Dict = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
307
def a_ ( _A = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
307
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __UpperCamelCase : Optional[Any] = TypeVar("""T""") class __SCREAMING_SNAKE_CASE( Generic[T] ): def __init__( self: List[str] , UpperCamelCase: list[T] , UpperCamelCase: Callable[[T, T], T] ) -> None: snake_case__ = None snake_case__ = len(UpperCamelCase ) snake_case__ = [any_type for _ in range(self.N )] + arr snake_case__ = fnc self.build() def lowerCAmelCase_ ( self: int ) -> None: for p in range(self.N - 1 , 0 , -1 ): snake_case__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: int , UpperCamelCase: T ) -> None: p += self.N snake_case__ = v while p > 1: snake_case__ = p // 2 snake_case__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: int , UpperCamelCase: int ) -> T | None: # noqa: E741 snake_case__ , snake_case__ = l + self.N, r + self.N snake_case__ = None while l <= r: if l % 2 == 1: snake_case__ = self.st[l] if res is None else self.fn(UpperCamelCase , self.st[l] ) if r % 2 == 0: snake_case__ = self.st[r] if res is None else self.fn(UpperCamelCase , self.st[r] ) snake_case__ , snake_case__ = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __UpperCamelCase : Any = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __UpperCamelCase : Optional[Any] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __UpperCamelCase : Dict = SegmentTree(test_array, min) __UpperCamelCase : Optional[Any] = SegmentTree(test_array, max) __UpperCamelCase : int = SegmentTree(test_array, lambda a, b: a + b) def a_ ( ) -> None: """simple docstring""" for i in range(len(_A ) ): for j in range(_A , len(_A ) ): snake_case__ = reduce(_A , test_array[i : j + 1] ) snake_case__ = reduce(_A , test_array[i : j + 1] ) snake_case__ = reduce(lambda _A , _A : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(_A , _A ) assert max_range == max_segment_tree.query(_A , _A ) assert sum_range == sum_segment_tree.query(_A , _A ) test_all_segments() for index, value in test_updates.items(): __UpperCamelCase : List[str] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
307
import os def a_ ( ) -> Optional[Any]: """simple docstring""" snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' ) with open(_A ) as file_hand: return str(sum(int(_A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
307
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
1
def a_ ( _A , _A ) -> int: """simple docstring""" while a != 0: snake_case__ , snake_case__ = b % a, a return b def a_ ( _A , _A ) -> int: """simple docstring""" if gcd(_A , _A ) != 1: snake_case__ = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(_A ) snake_case__ , snake_case__ , snake_case__ = 1, 0, a snake_case__ , snake_case__ , snake_case__ = 0, 1, m while va != 0: snake_case__ = ua // va snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
307
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Any = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } __UpperCamelCase : Optional[Any] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def a_ ( _A , _A=1 , _A=256 ) -> str: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _A ) -> int: """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def a_ ( _A , _A , _A , _A=True ) -> List[str]: """simple docstring""" os.makedirs(_A , exist_ok=_A ) snake_case__ = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) snake_case__ = read_json(os.path.join(_A , 'params.json' ) ) snake_case__ = NUM_SHARDS[model_size] snake_case__ = params['n_layers'] snake_case__ = params['n_heads'] snake_case__ = n_heads // num_shards snake_case__ = params['dim'] snake_case__ = dim // n_heads snake_case__ = 10000.0 snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case__ = params['n_kv_heads'] # for GQA / MQA snake_case__ = n_heads_per_shard // num_key_value_heads snake_case__ = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case__ = n_heads snake_case__ = n_heads_per_shard snake_case__ = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case__ = [ torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(_A ) ] snake_case__ = 0 snake_case__ = {'weight_map': {}} for layer_i in range(_A ): snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case__ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) snake_case__ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = inv_freq for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case__ = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs snake_case__ = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256 snake_case__ = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def a_ ( _A , _A ) -> Tuple: """simple docstring""" # Initialize the tokenizer based on the `spm` model snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case__ = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def a_ ( ) -> str: """simple docstring""" snake_case__ = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) snake_case__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
307
1
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: int ) -> List[Any]: snake_case__ = inspect.getfile(accelerate.test_utils ) snake_case__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) snake_case__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case__ = F''' {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} '''.split() snake_case__ = [sys.executable] + distributed_args execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
307
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __UpperCamelCase : Any = """tiny-wmt19-en-ru""" # Build # borrowed from a test __UpperCamelCase : Dict = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] __UpperCamelCase : Optional[int] = dict(zip(vocab, range(len(vocab)))) __UpperCamelCase : Tuple = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase : List[str] = Path(tmpdirname) __UpperCamelCase : str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] __UpperCamelCase : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] __UpperCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) __UpperCamelCase : int = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __UpperCamelCase : int = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __UpperCamelCase : Tuple = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test __UpperCamelCase : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") __UpperCamelCase : str = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1
def a_ ( _A , _A , _A ) -> int: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(_A , n - 1 , _A ) * a) % mod else: snake_case__ = binary_exponentiation(_A , n / 2 , _A ) return (b * b) % mod # a prime number __UpperCamelCase : List[str] = 701 __UpperCamelCase : str = 1000000000 __UpperCamelCase : Optional[Any] = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
307
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __UpperCamelCase : int = 299792458 # Symbols __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""") def a_ ( _A ) -> float: """simple docstring""" if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def a_ ( _A ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(_A ) ** 2 ) def a_ ( _A ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0], [-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def a_ ( _A , _A = None ) -> np.ndarray: """simple docstring""" # Ensure event is not empty if event is None: snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __UpperCamelCase : List[Any] = transform(29979245) print("""Example of four vector: """) print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1} __UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
307
1
from numpy import exp, pi, sqrt def a_ ( _A , _A = 0.0 , _A = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
307
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int: super().__init__(features=UpperCamelCase ) snake_case__ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]: import torch if isinstance(UpperCamelCase , UpperCamelCase ) and column: if all( isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase ) return column def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]: import torch if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ): return value elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ = {} if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): snake_case__ = {'dtype': torch.intaa} elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase , PIL.Image.Image ): snake_case__ = np.asarray(UpperCamelCase ) return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ): snake_case__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) elif isinstance(UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]: return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase ) return self.recursive_tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor": snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) snake_case__ = self._consolidate(UpperCamelCase ) return column def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) for column_name in batch: snake_case__ = self._consolidate(batch[column_name] ) return batch
307
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {"""vocab_file""": """spiece.model"""} __UpperCamelCase : Any = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 __UpperCamelCase : Tuple = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } __UpperCamelCase : Optional[Any] = """▁""" class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) snake_case__ = legacy snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , ) snake_case__ = vocab_file snake_case__ = extra_ids snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any: if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , ) return max_model_length @property def lowerCAmelCase_ ( self: Tuple ) -> List[str]: return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase )) + [1] return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return list( set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()] def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]: if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self: Union[str, Any] ) -> List[str]: snake_case__ = self.__dict__.copy() snake_case__ = None return state def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]: snake_case__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ = {} snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]: # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' ) return super().tokenize(UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str: if not self.legacy: snake_case__ = text.startswith(UpperCamelCase ) if is_first: snake_case__ = text[1:] snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ): snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict: if token.startswith('<extra_id_' ): snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase ) snake_case__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple: if index < self.sp_model.get_piece_size(): snake_case__ = self.sp_model.IdToPiece(UpperCamelCase ) else: snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict: snake_case__ = [] snake_case__ = '' snake_case__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase ) + token snake_case__ = True snake_case__ = [] else: current_sub_tokens.append(UpperCamelCase ) snake_case__ = False out_string += self.sp_model.decode(UpperCamelCase ) return out_string.strip() def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , 'wb' ) as fi: snake_case__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,)
307
1
def a_ ( _A , _A ) -> int: """simple docstring""" while second != 0: snake_case__ = first & second first ^= second snake_case__ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Any = int(input("""Enter the first number: """).strip()) __UpperCamelCase : Tuple = int(input("""Enter the second number: """).strip()) print(f'''{add(first, second) = }''')
307
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE: def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: List[str] ) -> Dict: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict: snake_case__ = LlamaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str: snake_case__ = True snake_case__ = LlamaModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any: snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]: snake_case__ = True snake_case__ = True snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # first forward pass snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , ) snake_case__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] # select random slice snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def lowerCAmelCase_ ( self: int ) -> Dict: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ): _UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else () _UpperCAmelCase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = LlamaModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'single_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'multi_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self: Dict ) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ids_tensor([1, 10] , config.vocab_size ) snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = LlamaModel(UpperCamelCase ) original_model.to(UpperCamelCase ) original_model.eval() snake_case__ = original_model(UpperCamelCase ).last_hidden_state snake_case__ = original_model(UpperCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = {'type': scaling_type, 'factor': 10.0} snake_case__ = LlamaModel(UpperCamelCase ) scaled_model.to(UpperCamelCase ) scaled_model.eval() snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: int ) -> List[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) snake_case__ = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # fmt: off snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' snake_case__ = 'Simply put, the theory of relativity states that ' snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' ) snake_case__ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase ) # greedy generation outputs snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase ) snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
307
1
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __UpperCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) __UpperCamelCase : Union[str, Any] = parser.parse_args() __UpperCamelCase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __UpperCamelCase : int = CLIPImageProcessor() __UpperCamelCase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") __UpperCamelCase : Dict = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
307
from math import isclose, sqrt def a_ ( _A , _A , _A ) -> tuple[float, float, float]: """simple docstring""" snake_case__ = point_y / 4 / point_x snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case__ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case__ = outgoing_gradient**2 + 4 snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case__ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case__ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case__ = x_minus if isclose(_A , _A ) else x_plus snake_case__ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( _A = 1.4 , _A = -9.6 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = first_x_coord snake_case__ = first_y_coord snake_case__ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
307
1
from math import isclose, sqrt def a_ ( _A , _A , _A ) -> tuple[float, float, float]: """simple docstring""" snake_case__ = point_y / 4 / point_x snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case__ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case__ = outgoing_gradient**2 + 4 snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case__ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case__ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case__ = x_minus if isclose(_A , _A ) else x_plus snake_case__ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( _A = 1.4 , _A = -9.6 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = first_x_coord snake_case__ = first_y_coord snake_case__ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
307
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int: super().__init__(features=UpperCamelCase ) snake_case__ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]: import torch if isinstance(UpperCamelCase , UpperCamelCase ) and column: if all( isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase ) return column def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]: import torch if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ): return value elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ = {} if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): snake_case__ = {'dtype': torch.intaa} elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase , PIL.Image.Image ): snake_case__ = np.asarray(UpperCamelCase ) return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ): snake_case__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) elif isinstance(UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]: return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase ) return self.recursive_tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor": snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) snake_case__ = self._consolidate(UpperCamelCase ) return column def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) for column_name in batch: snake_case__ = self._consolidate(batch[column_name] ) return batch
307
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Dict = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "perceiver" def __init__( self: Optional[Any] , UpperCamelCase: Union[str, Any]=2_56 , UpperCamelCase: Dict=12_80 , UpperCamelCase: Any=7_68 , UpperCamelCase: List[str]=1 , UpperCamelCase: Optional[int]=26 , UpperCamelCase: Union[str, Any]=8 , UpperCamelCase: Tuple=8 , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: int=None , UpperCamelCase: List[Any]="kv" , UpperCamelCase: Any=1 , UpperCamelCase: List[Any]=1 , UpperCamelCase: Tuple="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: List[str]=0.02 , UpperCamelCase: Tuple=1e-12 , UpperCamelCase: str=True , UpperCamelCase: Optional[Any]=2_62 , UpperCamelCase: List[Any]=20_48 , UpperCamelCase: Any=56 , UpperCamelCase: Any=[3_68, 4_96] , UpperCamelCase: Any=16 , UpperCamelCase: Dict=19_20 , UpperCamelCase: int=16 , UpperCamelCase: Optional[Any]=[1, 16, 2_24, 2_24] , **UpperCamelCase: int , ) -> List[Any]: super().__init__(**UpperCamelCase ) snake_case__ = num_latents snake_case__ = d_latents snake_case__ = d_model snake_case__ = num_blocks snake_case__ = num_self_attends_per_block snake_case__ = num_self_attention_heads snake_case__ = num_cross_attention_heads snake_case__ = qk_channels snake_case__ = v_channels snake_case__ = cross_attention_shape_for_attention snake_case__ = self_attention_widening_factor snake_case__ = cross_attention_widening_factor snake_case__ = hidden_act snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = use_query_residual # masked language modeling attributes snake_case__ = vocab_size snake_case__ = max_position_embeddings # image classification attributes snake_case__ = image_size # flow attributes snake_case__ = train_size # multimodal autoencoding attributes snake_case__ = num_frames snake_case__ = audio_samples_per_frame snake_case__ = samples_per_patch snake_case__ = output_shape class __SCREAMING_SNAKE_CASE( a_ ): @property def lowerCAmelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def lowerCAmelCase_ ( self: Optional[Any] ) -> float: return 1e-4 def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase: int = -1 , UpperCamelCase: int = -1 , UpperCamelCase: int = -1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[TensorType] = None , UpperCamelCase: int = 3 , UpperCamelCase: int = 40 , UpperCamelCase: int = 40 , ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(UpperCamelCase , UpperCamelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case__ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case__ = preprocessor.num_special_tokens_to_add(UpperCamelCase ) snake_case__ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence snake_case__ = [' '.join(['a'] ) * seq_length] * batch_size snake_case__ = dict(preprocessor(UpperCamelCase , return_tensors=UpperCamelCase ) ) snake_case__ = inputs.pop('input_ids' ) return inputs elif isinstance(UpperCamelCase , UpperCamelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case__ = compute_effective_axis_dimension(UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch ) snake_case__ = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) ) snake_case__ = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
307
import doctest from collections import deque import numpy as np class __SCREAMING_SNAKE_CASE: def __init__( self: Dict ) -> None: snake_case__ = [2, 1, 2, -1] snake_case__ = [1, 2, 3, 4] def lowerCAmelCase_ ( self: List[str] ) -> list[float]: snake_case__ = len(self.first_signal ) snake_case__ = len(self.second_signal ) snake_case__ = max(UpperCamelCase , UpperCamelCase ) # create a zero matrix of max_length x max_length snake_case__ = [[0] * max_length for i in range(UpperCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase ): snake_case__ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase ) for j, item in enumerate(UpperCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
307
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) snake_case__ = [] for i in range(_A ): snake_case__ = i / num_diffusion_timesteps snake_case__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str: if trained_betas is not None: snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) snake_case__ = 1.0 - self.betas snake_case__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = use_karras_sigmas def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str: if schedule_timesteps is None: snake_case__ = self.timesteps snake_case__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0 else: snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep snake_case__ = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: snake_case__ = self.index_for_timestep(UpperCamelCase ) snake_case__ = self.sigmas[step_index] snake_case__ = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str: snake_case__ = num_inference_steps snake_case__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": snake_case__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": snake_case__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) snake_case__ = np.log(UpperCamelCase ) snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase ) if self.config.use_karras_sigmas: snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps ) snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] ) snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ) snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) snake_case__ = torch.from_numpy(UpperCamelCase ) snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase ).startswith('mps' ): # mps does not support float64 snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa ) else: snake_case__ = timesteps.to(device=UpperCamelCase ) # empty dt and derivative snake_case__ = None snake_case__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter snake_case__ = defaultdict(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple: # get log sigma snake_case__ = np.log(UpperCamelCase ) # get distribution snake_case__ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) snake_case__ = low_idx + 1 snake_case__ = log_sigmas[low_idx] snake_case__ = log_sigmas[high_idx] # interpolate sigmas snake_case__ = (low - log_sigma) / (low - high) snake_case__ = np.clip(UpperCamelCase , 0 , 1 ) # transform interpolation to time range snake_case__ = (1 - w) * low_idx + w * high_idx snake_case__ = t.reshape(sigma.shape ) return t def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor: snake_case__ = in_sigmas[-1].item() snake_case__ = in_sigmas[0].item() snake_case__ = 7.0 # 7.0 is the value used in the paper snake_case__ = np.linspace(0 , 1 , UpperCamelCase ) snake_case__ = sigma_min ** (1 / rho) snake_case__ = sigma_max ** (1 / rho) snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: return self.dt is None def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: snake_case__ = self.index_for_timestep(UpperCamelCase ) # advance index counter by 1 snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: snake_case__ = self.sigmas[step_index] snake_case__ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method snake_case__ = self.sigmas[step_index - 1] snake_case__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API snake_case__ = 0 snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": snake_case__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: snake_case__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order snake_case__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep snake_case__ = sigma_next - sigma_hat # store for 2nd order step snake_case__ = derivative snake_case__ = dt snake_case__ = sample else: # 2. 2nd order / Heun's method snake_case__ = (sample - pred_original_sample) / sigma_next snake_case__ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample snake_case__ = self.dt snake_case__ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ): # mps does not support float64 snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: snake_case__ = self.timesteps.to(original_samples.device ) snake_case__ = timesteps.to(original_samples.device ) snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps] snake_case__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): snake_case__ = sigma.unsqueeze(-1 ) snake_case__ = original_samples + noise * sigma return noisy_samples def __len__( self: List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
307
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = (DPMSolverSDEScheduler,) _UpperCAmelCase = 1_0 def lowerCAmelCase_ ( self: Optional[int] , **UpperCamelCase: Dict ) -> str: snake_case__ = { 'num_train_timesteps': 11_00, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**UpperCamelCase ) return config def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ = sample.to(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) snake_case__ = model(UpperCamelCase , UpperCamelCase ) snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(prediction_type='v_prediction' ) snake_case__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ = sample.to(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) snake_case__ = model(UpperCamelCase , UpperCamelCase ) snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3 def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter.to(UpperCamelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) snake_case__ = model(UpperCamelCase , UpperCamelCase ) snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3 def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase , use_karras_sigmas=UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter.to(UpperCamelCase ) * scheduler.init_noise_sigma snake_case__ = sample.to(UpperCamelCase ) for t in scheduler.timesteps: snake_case__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) snake_case__ = model(UpperCamelCase , UpperCamelCase ) snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
307
from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCamelCase : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from __future__ import annotations def a_ ( _A , _A ) -> set[str]: """simple docstring""" snake_case__ , snake_case__ = set(_A ), [start] while stack: snake_case__ = stack.pop() explored.add(_A ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(_A ) return explored __UpperCamelCase : Tuple = { """A""": ["""B""", """C""", """D"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F"""], """D""": ["""B""", """D"""], """E""": ["""B""", """F"""], """F""": ["""C""", """E""", """G"""], """G""": ["""F"""], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, """A"""))
307
def a_ ( _A , _A ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def a_ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
307
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCamelCase : int = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[str] = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
1
def a_ ( _A = 1000000 ) -> int: """simple docstring""" snake_case__ = set(range(3 , _A , 2 ) ) primes.add(2 ) for p in range(3 , _A , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _A , _A ) ) ) snake_case__ = [float(_A ) for n in range(limit + 1 )] for p in primes: for n in range(_A , limit + 1 , _A ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
1
from __future__ import annotations __UpperCamelCase : Dict = [True] * 1000001 __UpperCamelCase : Optional[int] = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): __UpperCamelCase : Tuple = False i += 1 def a_ ( _A ) -> bool: """simple docstring""" return seive[n] def a_ ( _A ) -> bool: """simple docstring""" return any(digit in '02468' for digit in str(_A ) ) def a_ ( _A = 1000000 ) -> list[int]: """simple docstring""" snake_case__ = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(_A ) and not contains_an_even_digit(_A ): snake_case__ = str(_A ) snake_case__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )] if all(is_prime(_A ) for i in list_nums ): result.append(_A ) return result def a_ ( ) -> int: """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(f'''{len(find_circular_primes()) = }''')
307
import random from typing import Any def a_ ( _A ) -> list[Any]: """simple docstring""" for _ in range(len(_A ) ): snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": __UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
307
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } __UpperCamelCase : List[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a_ ( _A , _A , _A , _A , _A ) -> Optional[int]: """simple docstring""" for attribute in key.split('.' ): snake_case__ = getattr(_A , _A ) if weight_type is not None: snake_case__ = getattr(_A , _A ).shape else: snake_case__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ = value elif weight_type == "weight_g": snake_case__ = value elif weight_type == "weight_v": snake_case__ = value elif weight_type == "bias": snake_case__ = value elif weight_type == "running_mean": snake_case__ = value elif weight_type == "running_var": snake_case__ = value elif weight_type == "num_batches_tracked": snake_case__ = value elif weight_type == "inv_freq": snake_case__ = value else: snake_case__ = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a_ ( _A , _A , _A ) -> Optional[int]: """simple docstring""" snake_case__ = [] snake_case__ = fairseq_model.state_dict() snake_case__ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): snake_case__ = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , ) snake_case__ = True else: for key, mapped_key in MAPPING.items(): snake_case__ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case__ = True if "*" in mapped_key: snake_case__ = name.split(_A )[0].split('.' )[-2] snake_case__ = mapped_key.replace('*' , _A ) if "pos_bias_u" in name: snake_case__ = None elif "pos_bias_v" in name: snake_case__ = None elif "weight_g" in name: snake_case__ = 'weight_g' elif "weight_v" in name: snake_case__ = 'weight_v' elif "bias" in name: snake_case__ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ = 'weight' elif "running_mean" in name: snake_case__ = 'running_mean' elif "inv_freq" in name: snake_case__ = 'inv_freq' elif "running_var" in name: snake_case__ = 'running_var' elif "num_batches_tracked" in name: snake_case__ = 'num_batches_tracked' else: snake_case__ = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f'''Unused weights: {unused_weights}''' ) def a_ ( _A , _A , _A , _A , _A ) -> Union[str, Any]: """simple docstring""" snake_case__ = full_name.split('conv_layers.' )[-1] snake_case__ = name.split('.' ) snake_case__ = int(items[0] ) snake_case__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_A ) @torch.no_grad() def a_ ( _A , _A , _A=None , _A=None , _A=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: snake_case__ = WavaVecaConformerConfig.from_pretrained(_A , hidden_act='swish' ) else: snake_case__ = WavaVecaConformerConfig() if "rope" in checkpoint_path: snake_case__ = 'rotary' if is_finetuned: if dict_path: snake_case__ = Dictionary.load(_A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case__ = target_dict.pad_index snake_case__ = target_dict.bos_index snake_case__ = target_dict.eos_index snake_case__ = len(target_dict.symbols ) snake_case__ = os.path.join(_A , 'vocab.json' ) if not os.path.isdir(_A ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_A ) ) return os.makedirs(_A , exist_ok=_A ) snake_case__ = target_dict.indices # fairseq has the <pad> and <s> switched snake_case__ = 0 snake_case__ = 1 with open(_A , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(_A , _A ) snake_case__ = WavaVecaCTCTokenizer( _A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_A , ) snake_case__ = True if config.feat_extract_norm == 'layer' else False snake_case__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , ) snake_case__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A ) processor.save_pretrained(_A ) snake_case__ = WavaVecaConformerForCTC(_A ) else: snake_case__ = WavaVecaConformerForPreTraining(_A ) if is_finetuned: snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case__ = argparse.Namespace(task='audio_pretraining' ) snake_case__ = fairseq.tasks.setup_task(_A ) snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A ) snake_case__ = model[0].eval() recursively_load_weights(_A , _A , not is_finetuned ) hf_wavavec.save_pretrained(_A ) if __name__ == "__main__": __UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __UpperCamelCase : Tuple = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
307
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def __init__( self: Tuple , UpperCamelCase: Any , UpperCamelCase: List[Any]=7 , UpperCamelCase: List[str]=3 , UpperCamelCase: Dict=18 , UpperCamelCase: Union[str, Any]=30 , UpperCamelCase: Optional[Any]=4_00 , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=None , UpperCamelCase: int=True , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Any=True , UpperCamelCase: Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCamelCase: Dict=[0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCamelCase: Union[str, Any]=True , ) -> Any: snake_case__ = size if size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = crop_size if crop_size is not None else {'height': 18, 'width': 18} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_normalize snake_case__ = image_mean snake_case__ = image_std snake_case__ = do_convert_rgb def lowerCAmelCase_ ( self: str ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any=False , UpperCamelCase: Any=False , UpperCamelCase: List[Any]=False ) -> Optional[int]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: snake_case__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: snake_case__ = [] for i in range(self.batch_size ): snake_case__ , snake_case__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension snake_case__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs] if torchify: snake_case__ = [torch.from_numpy(UpperCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: snake_case__ = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase ) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'size' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase , 'center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) ) def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple: snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: pass def lowerCAmelCase_ ( self: Dict ) -> int: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self: List[str] ) -> int: snake_case__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase ) snake_case__ = 3 @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self: Tuple ) -> Tuple: snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'size' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase , 'center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: pass def lowerCAmelCase_ ( self: Any ) -> List[Any]: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
307
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["image_processor", "tokenizer"] _UpperCAmelCase = "LayoutLMv2ImageProcessor" _UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase , ) snake_case__ = kwargs.pop('feature_extractor' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ = features['words'] snake_case__ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values snake_case__ = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] ) snake_case__ = images return encoded_inputs def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: str ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , ) return self.image_processor
307
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __UpperCamelCase : Optional[int] = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "token-classification" def __init__( self: Union[str, Any] , UpperCamelCase: List[str] ) -> Tuple: if type(UpperCamelCase ) == dict: snake_case__ = Namespace(**UpperCamelCase ) snake_case__ = import_module('tasks' ) try: snake_case__ = getattr(UpperCamelCase , hparams.task_type ) snake_case__ = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) snake_case__ = self.token_classification_task.get_labels(hparams.labels ) snake_case__ = CrossEntropyLoss().ignore_index super().__init__(UpperCamelCase , len(self.labels ) , self.mode ) def lowerCAmelCase_ ( self: List[str] , **UpperCamelCase: Optional[int] ) -> Dict: return self.model(**UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: str ) -> Union[str, Any]: snake_case__ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": snake_case__ = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case__ = self(**UpperCamelCase ) snake_case__ = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]: snake_case__ = self.hparams for mode in ["train", "dev", "test"]: snake_case__ = self._feature_file(UpperCamelCase ) if os.path.exists(UpperCamelCase ) and not args.overwrite_cache: logger.info('Loading features from cached file %s' , UpperCamelCase ) snake_case__ = torch.load(UpperCamelCase ) else: logger.info('Creating features from dataset file at %s' , args.data_dir ) snake_case__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase ) snake_case__ = self.token_classification_task.convert_examples_to_features( UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('Saving features into cached file %s' , UpperCamelCase ) torch.save(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: bool = False ) -> DataLoader: snake_case__ = self._feature_file(UpperCamelCase ) logger.info('Loading features from cached file %s' , UpperCamelCase ) snake_case__ = torch.load(UpperCamelCase ) snake_case__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: snake_case__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: snake_case__ = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) snake_case__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: Tuple ) -> Any: """Compute validation""" "" snake_case__ = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if self.config.model_type != "distilbert": snake_case__ = ( batch[2] if self.config.model_type in ['bert', 'xlnet'] else None ) # XLM and RoBERTa don"t use token_type_ids snake_case__ = self(**UpperCamelCase ) snake_case__ , snake_case__ = outputs[:2] snake_case__ = logits.detach().cpu().numpy() snake_case__ = inputs['labels'].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Dict ) -> Dict: snake_case__ = torch.stack([x['val_loss'] for x in outputs] ).mean() snake_case__ = np.concatenate([x['pred'] for x in outputs] , axis=0 ) snake_case__ = np.argmax(UpperCamelCase , axis=2 ) snake_case__ = np.concatenate([x['target'] for x in outputs] , axis=0 ) snake_case__ = dict(enumerate(self.labels ) ) snake_case__ = [[] for _ in range(out_label_ids.shape[0] )] snake_case__ = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) snake_case__ = { 'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(UpperCamelCase , UpperCamelCase ), 'precision': precision_score(UpperCamelCase , UpperCamelCase ), 'recall': recall_score(UpperCamelCase , UpperCamelCase ), 'f1': fa_score(UpperCamelCase , UpperCamelCase ), } snake_case__ = dict(results.items() ) snake_case__ = results return ret, preds_list, out_label_list def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[str] ) -> List[Any]: # when stable snake_case__ , snake_case__ , snake_case__ = self._eval_end(UpperCamelCase ) snake_case__ = ret['log'] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Tuple ) -> Optional[int]: # updating to test_epoch_end instead of deprecated test_end snake_case__ , snake_case__ , snake_case__ = self._eval_end(UpperCamelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 snake_case__ = ret['log'] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase_ ( UpperCamelCase: str , UpperCamelCase: Optional[Any] ) -> int: # Add NER specific options BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase ) parser.add_argument( '--task_type' , default='NER' , type=UpperCamelCase , help='Task type to fine tune in training (e.g. NER, POS, etc)' ) parser.add_argument( '--max_seq_length' , default=1_28 , type=UpperCamelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--labels' , default='' , type=UpperCamelCase , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , ) parser.add_argument( '--gpus' , default=0 , type=UpperCamelCase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) return parser if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __UpperCamelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd()) __UpperCamelCase : Optional[int] = parser.parse_args() __UpperCamelCase : Dict = NERTransformer(args) __UpperCamelCase : Tuple = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __UpperCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True)) __UpperCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
307
def a_ ( _A = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
307
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE: @staticmethod def lowerCAmelCase_ ( *UpperCamelCase: List[str] , **UpperCamelCase: Dict ) -> List[Any]: pass @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] ) -> int: snake_case__ = ObjectDetectionPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowerCAmelCase_ ( self: str , UpperCamelCase: Any , UpperCamelCase: List[Any] ) -> List[str]: snake_case__ = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 ) self.assertGreater(len(UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCamelCase , { 'score': ANY(UpperCamelCase ), 'label': ANY(UpperCamelCase ), 'box': {'xmin': ANY(UpperCamelCase ), 'ymin': ANY(UpperCamelCase ), 'xmax': ANY(UpperCamelCase ), 'ymax': ANY(UpperCamelCase )}, } , ) import datasets snake_case__ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) snake_case__ = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] snake_case__ = object_detector(UpperCamelCase , threshold=0.0 ) self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCamelCase , { 'score': ANY(UpperCamelCase ), 'label': ANY(UpperCamelCase ), 'box': {'xmin': ANY(UpperCamelCase ), 'ymin': ANY(UpperCamelCase ), 'xmax': ANY(UpperCamelCase ), 'ymax': ANY(UpperCamelCase )}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF' ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: pass @require_torch def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = 'hf-internal-testing/tiny-detr-mobilenetsv3' snake_case__ = AutoModelForObjectDetection.from_pretrained(UpperCamelCase ) snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase ) snake_case__ = ObjectDetectionPipeline(model=UpperCamelCase , feature_extractor=UpperCamelCase ) snake_case__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ] , ) snake_case__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [ {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], [ {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], ] , ) @require_torch @slow def lowerCAmelCase_ ( self: str ) -> List[str]: snake_case__ = 'facebook/detr-resnet-50' snake_case__ = AutoModelForObjectDetection.from_pretrained(UpperCamelCase ) snake_case__ = AutoFeatureExtractor.from_pretrained(UpperCamelCase ) snake_case__ = ObjectDetectionPipeline(model=UpperCamelCase , feature_extractor=UpperCamelCase ) snake_case__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) snake_case__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case__ = 'facebook/detr-resnet-50' snake_case__ = pipeline('object-detection' , model=UpperCamelCase ) snake_case__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) snake_case__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowerCAmelCase_ ( self: List[Any] ) -> List[str]: snake_case__ = 0.9_985 snake_case__ = 'facebook/detr-resnet-50' snake_case__ = pipeline('object-detection' , model=UpperCamelCase ) snake_case__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=UpperCamelCase ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) @require_torch @require_pytesseract @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case__ = 'Narsil/layoutlmv3-finetuned-funsd' snake_case__ = 0.9_993 snake_case__ = pipeline('object-detection' , model=UpperCamelCase , threshold=UpperCamelCase ) snake_case__ = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, {'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, ] , )
307
import os def a_ ( ) -> Optional[Any]: """simple docstring""" snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' ) with open(_A ) as file_hand: return str(sum(int(_A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
307
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : List[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = ["""YolosFeatureExtractor"""] __UpperCamelCase : Tuple = ["""YolosImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ """YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""", """YolosForObjectDetection""", """YolosModel""", """YolosPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __SCREAMING_SNAKE_CASE( unittest.TestCase ): _UpperCAmelCase = inspect.getfile(accelerate.test_utils ) _UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) _UpperCAmelCase = ["accelerate", "launch"] _UpperCAmelCase = Path.home() / ".cache/huggingface/accelerate" _UpperCAmelCase = "default_config.yaml" _UpperCAmelCase = config_folder / config_file _UpperCAmelCase = config_folder / "_default_config.yaml" _UpperCAmelCase = Path("tests/test_configs" ) @classmethod def lowerCAmelCase_ ( cls: Tuple ) -> List[str]: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCAmelCase_ ( cls: Tuple ) -> Tuple: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=UpperCamelCase ): execute_subprocess_async( self.base_cmd + ['--config_file', str(UpperCamelCase ), self.test_file_path] , env=os.environ.copy() ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() ) class __SCREAMING_SNAKE_CASE( unittest.TestCase ): _UpperCAmelCase = "test-tpu" _UpperCAmelCase = "us-central1-a" _UpperCAmelCase = "ls" _UpperCAmelCase = ["accelerate", "tpu-config"] _UpperCAmelCase = "cd /usr/share" _UpperCAmelCase = "tests/test_samples/test_command_file.sh" _UpperCAmelCase = "Running gcloud compute tpus tpu-vm ssh" def lowerCAmelCase_ ( self: Any ) -> Optional[Any]: snake_case__ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: str ) -> Optional[Any]: snake_case__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple: snake_case__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=UpperCamelCase ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: Dict ) -> Dict: snake_case__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: List[Any] ) -> Dict: snake_case__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: snake_case__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: Any ) -> List[str]: snake_case__ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , ) def lowerCAmelCase_ ( self: int ) -> Any: snake_case__ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=UpperCamelCase , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
307
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Any = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } __UpperCamelCase : Optional[Any] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def a_ ( _A , _A=1 , _A=256 ) -> str: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _A ) -> int: """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def a_ ( _A , _A , _A , _A=True ) -> List[str]: """simple docstring""" os.makedirs(_A , exist_ok=_A ) snake_case__ = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) snake_case__ = read_json(os.path.join(_A , 'params.json' ) ) snake_case__ = NUM_SHARDS[model_size] snake_case__ = params['n_layers'] snake_case__ = params['n_heads'] snake_case__ = n_heads // num_shards snake_case__ = params['dim'] snake_case__ = dim // n_heads snake_case__ = 10000.0 snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case__ = params['n_kv_heads'] # for GQA / MQA snake_case__ = n_heads_per_shard // num_key_value_heads snake_case__ = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case__ = n_heads snake_case__ = n_heads_per_shard snake_case__ = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case__ = [ torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(_A ) ] snake_case__ = 0 snake_case__ = {'weight_map': {}} for layer_i in range(_A ): snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case__ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) snake_case__ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = inv_freq for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case__ = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs snake_case__ = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256 snake_case__ = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def a_ ( _A , _A ) -> Tuple: """simple docstring""" # Initialize the tokenizer based on the `spm` model snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case__ = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def a_ ( ) -> str: """simple docstring""" snake_case__ = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) snake_case__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
307
1
from __future__ import annotations from typing import Any def a_ ( _A ) -> None: """simple docstring""" create_state_space_tree(_A , [] , 0 ) def a_ ( _A , _A , _A ) -> None: """simple docstring""" if index == len(_A ): print(_A ) return create_state_space_tree(_A , _A , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_A , _A , index + 1 ) current_subsequence.pop() if __name__ == "__main__": __UpperCamelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
307
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __UpperCamelCase : Union[str, Any] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys __UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = 1 @register_to_config def __init__( self: Any , UpperCamelCase: List[str]=20_00 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Dict=20 , UpperCamelCase: List[str]=1e-3 ) -> int: snake_case__ = None snake_case__ = None snake_case__ = None def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: List[str] , UpperCamelCase: Union[str, torch.device] = None ) -> int: snake_case__ = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase , device=UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict=None ) -> Optional[Any]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score snake_case__ = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) snake_case__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) snake_case__ = std.flatten() while len(std.shape ) < len(score.shape ): snake_case__ = std.unsqueeze(-1 ) snake_case__ = -score / std # compute snake_case__ = -1.0 / len(self.timesteps ) snake_case__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) snake_case__ = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): snake_case__ = beta_t.unsqueeze(-1 ) snake_case__ = -0.5 * beta_t * x snake_case__ = torch.sqrt(UpperCamelCase ) snake_case__ = drift - diffusion**2 * score snake_case__ = x + drift * dt # add noise snake_case__ = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase , device=x.device , dtype=x.dtype ) snake_case__ = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self: List[Any] ) -> int: return self.config.num_train_timesteps
307
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __UpperCamelCase : int = 299792458 # Symbols __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""") def a_ ( _A ) -> float: """simple docstring""" if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def a_ ( _A ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(_A ) ** 2 ) def a_ ( _A ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0], [-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def a_ ( _A , _A = None ) -> np.ndarray: """simple docstring""" # Ensure event is not empty if event is None: snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __UpperCamelCase : List[Any] = transform(29979245) print("""Example of four vector: """) print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1} __UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
307
1
from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE( metaclass=a_ ): _UpperCAmelCase = ["torch", "scipy"] def __init__( self: Dict , *UpperCamelCase: List[Any] , **UpperCamelCase: str ) -> List[str]: requires_backends(self , ['torch', 'scipy'] ) @classmethod def lowerCAmelCase_ ( cls: str , *UpperCamelCase: Dict , **UpperCamelCase: Dict ) -> Tuple: requires_backends(cls , ['torch', 'scipy'] ) @classmethod def lowerCAmelCase_ ( cls: Optional[int] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: List[str] ) -> Optional[Any]: requires_backends(cls , ['torch', 'scipy'] )
307
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __UpperCamelCase : Any = { """E""": 1_2.7_0, """T""": 9.0_6, """A""": 8.1_7, """O""": 7.5_1, """I""": 6.9_7, """N""": 6.7_5, """S""": 6.3_3, """H""": 6.0_9, """R""": 5.9_9, """D""": 4.2_5, """L""": 4.0_3, """C""": 2.7_8, """U""": 2.7_6, """M""": 2.4_1, """W""": 2.3_6, """F""": 2.2_3, """G""": 2.0_2, """Y""": 1.9_7, """P""": 1.9_3, """B""": 1.2_9, """V""": 0.9_8, """K""": 0.7_7, """J""": 0.1_5, """X""": 0.1_5, """Q""": 0.1_0, """Z""": 0.0_7, } __UpperCamelCase : List[Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" __UpperCamelCase : Union[str, Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def a_ ( _A ) -> dict[str, int]: """simple docstring""" snake_case__ = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def a_ ( _A ) -> str: """simple docstring""" return x[0] def a_ ( _A ) -> str: """simple docstring""" snake_case__ = get_letter_count(_A ) snake_case__ = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(_A ) snake_case__ = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_A ) snake_case__ = ''.join(freq_to_letter[freq] ) snake_case__ = list(freq_to_letter_str.items() ) freq_pairs.sort(key=_A , reverse=_A ) snake_case__ = [freq_pair[1] for freq_pair in freq_pairs] return "".join(_A ) def a_ ( _A ) -> int: """simple docstring""" snake_case__ = get_frequency_order(_A ) snake_case__ = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
307
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {"""vocab_file""": """spiece.model"""} __UpperCamelCase : Any = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 __UpperCamelCase : Tuple = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } __UpperCamelCase : Optional[Any] = """▁""" class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) snake_case__ = legacy snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , ) snake_case__ = vocab_file snake_case__ = extra_ids snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any: if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , ) return max_model_length @property def lowerCAmelCase_ ( self: Tuple ) -> List[str]: return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase )) + [1] return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return list( set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()] def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]: if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self: Union[str, Any] ) -> List[str]: snake_case__ = self.__dict__.copy() snake_case__ = None return state def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]: snake_case__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ = {} snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]: # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' ) return super().tokenize(UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str: if not self.legacy: snake_case__ = text.startswith(UpperCamelCase ) if is_first: snake_case__ = text[1:] snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ): snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict: if token.startswith('<extra_id_' ): snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase ) snake_case__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple: if index < self.sp_model.get_piece_size(): snake_case__ = self.sp_model.IdToPiece(UpperCamelCase ) else: snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict: snake_case__ = [] snake_case__ = '' snake_case__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase ) + token snake_case__ = True snake_case__ = [] else: current_sub_tokens.append(UpperCamelCase ) snake_case__ = False out_string += self.sp_model.decode(UpperCamelCase ) return out_string.strip() def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , 'wb' ) as fi: snake_case__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,)
307
1
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @require_torch def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ = pipeline( task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' ) snake_case__ = load_dataset('ashraq/esc50' ) snake_case__ = dataset['train']['audio'][-1]['array'] snake_case__ = audio_classifier(UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , ) @unittest.skip('No models are available in TF' ) def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]: pass @slow @require_torch def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: snake_case__ = pipeline( task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , ) # This is an audio of a dog snake_case__ = load_dataset('ashraq/esc50' ) snake_case__ = dataset['train']['audio'][-1]['array'] snake_case__ = audio_classifier(UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ] , ) snake_case__ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) snake_case__ = audio_classifier( [audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {'score': 0.999, 'label': 'Sound of a dog'}, {'score': 0.001, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) @unittest.skip('No models are available in TF' ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]: pass
307
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE: def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: List[str] ) -> Dict: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict: snake_case__ = LlamaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str: snake_case__ = True snake_case__ = LlamaModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any: snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]: snake_case__ = True snake_case__ = True snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # first forward pass snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , ) snake_case__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] # select random slice snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def lowerCAmelCase_ ( self: int ) -> Dict: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ): _UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else () _UpperCAmelCase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = LlamaModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'single_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'multi_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self: Dict ) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ids_tensor([1, 10] , config.vocab_size ) snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = LlamaModel(UpperCamelCase ) original_model.to(UpperCamelCase ) original_model.eval() snake_case__ = original_model(UpperCamelCase ).last_hidden_state snake_case__ = original_model(UpperCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = {'type': scaling_type, 'factor': 10.0} snake_case__ = LlamaModel(UpperCamelCase ) scaled_model.to(UpperCamelCase ) scaled_model.eval() snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: int ) -> List[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) snake_case__ = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # fmt: off snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' snake_case__ = 'Simply put, the theory of relativity states that ' snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' ) snake_case__ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase ) # greedy generation outputs snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase ) snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
307
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = (DDPMScheduler,) def lowerCAmelCase_ ( self: int , **UpperCamelCase: Union[str, Any] ) -> int: snake_case__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**UpperCamelCase ) return config def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase ) def lowerCAmelCase_ ( self: str ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Any: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> Any: for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: self.check_over_configs(thresholding=UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Any: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple ) -> int: for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = len(UpperCamelCase ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter snake_case__ = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase ) ): # 1. predict noise residual snake_case__ = model(UpperCamelCase , UpperCamelCase ) # 2. predict previous mean of sample x_t-1 snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def lowerCAmelCase_ ( self: Any ) -> Optional[int]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(prediction_type='v_prediction' ) snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = len(UpperCamelCase ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter snake_case__ = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase ) ): # 1. predict noise residual snake_case__ = model(UpperCamelCase , UpperCamelCase ) # 2. predict previous mean of sample x_t-1 snake_case__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance snake_case__ = pred_prev_sample snake_case__ = torch.sum(torch.abs(UpperCamelCase ) ) snake_case__ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase ) snake_case__ = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase ): if i == len(UpperCamelCase ) - 1: snake_case__ = -1 else: snake_case__ = timesteps[i + 1] snake_case__ = scheduler.previous_timestep(UpperCamelCase ) snake_case__ = prev_t.item() self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = [1_00, 87, 50, 51, 0] with self.assertRaises(UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = [1_00, 87, 50, 1, 0] snake_case__ = len(UpperCamelCase ) with self.assertRaises(UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> List[Any]: snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase ) snake_case__ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=UpperCamelCase )
307
from math import isclose, sqrt def a_ ( _A , _A , _A ) -> tuple[float, float, float]: """simple docstring""" snake_case__ = point_y / 4 / point_x snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case__ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case__ = outgoing_gradient**2 + 4 snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case__ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case__ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case__ = x_minus if isclose(_A , _A ) else x_plus snake_case__ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( _A = 1.4 , _A = -9.6 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = first_x_coord snake_case__ = first_y_coord snake_case__ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
307
1
from __future__ import annotations from random import choice def a_ ( _A ) -> Tuple: """simple docstring""" return choice(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" snake_case__ = random_pivot(_A ) # partition based on pivot # linear time snake_case__ = [e for e in lst if e < pivot] snake_case__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(_A ) == k - 1: return pivot # pivot is in elements bigger than k elif len(_A ) < k - 1: return kth_number(_A , k - len(_A ) - 1 ) # pivot is in elements smaller than k else: return kth_number(_A , _A ) if __name__ == "__main__": import doctest doctest.testmod()
307
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int: super().__init__(features=UpperCamelCase ) snake_case__ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]: import torch if isinstance(UpperCamelCase , UpperCamelCase ) and column: if all( isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase ) return column def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]: import torch if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ): return value elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ = {} if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): snake_case__ = {'dtype': torch.intaa} elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase , PIL.Image.Image ): snake_case__ = np.asarray(UpperCamelCase ) return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ): snake_case__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) elif isinstance(UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]: return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase ) return self.recursive_tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor": snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) snake_case__ = self._consolidate(UpperCamelCase ) return column def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) for column_name in batch: snake_case__ = self._consolidate(batch[column_name] ) return batch
307
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = 42 _UpperCAmelCase = jnp.floataa _UpperCAmelCase = True def lowerCAmelCase_ ( self: Any ) -> Optional[int]: super().setup() snake_case__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self: Union[str, Any] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Union[str, Any] ) -> str: snake_case__ = super().__call__(*UpperCamelCase , **UpperCamelCase ) snake_case__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule def a_ ( _A , _A , _A , _A , _A , _A ) -> List[str]: """simple docstring""" def cross_entropy(_A , _A , _A=None ): snake_case__ = logits.shape[-1] snake_case__ = (labels[..., None] == jnp.arange(_A )[None]).astype('f4' ) snake_case__ = jax.nn.log_softmax(_A , axis=-1 ) snake_case__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: snake_case__ = reduction(_A ) return loss snake_case__ = partial(_A , reduction=jnp.mean ) snake_case__ = cross_entropy(_A , _A ) snake_case__ = cross_entropy(_A , _A ) snake_case__ = cross_entropy(_A , _A ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = "google/bigbird-roberta-base" _UpperCAmelCase = 3_0_0_0 _UpperCAmelCase = 1_0_5_0_0 _UpperCAmelCase = 1_2_8 _UpperCAmelCase = 3 _UpperCAmelCase = 1 _UpperCAmelCase = 5 # tx_args _UpperCAmelCase = 3E-5 _UpperCAmelCase = 0.0 _UpperCAmelCase = 2_0_0_0_0 _UpperCAmelCase = 0.0_0_9_5 _UpperCAmelCase = "bigbird-roberta-natural-questions" _UpperCAmelCase = "training-expt" _UpperCAmelCase = "data/nq-training.jsonl" _UpperCAmelCase = "data/nq-validation.jsonl" def lowerCAmelCase_ ( self: Dict ) -> int: os.makedirs(self.base_dir , exist_ok=UpperCamelCase ) snake_case__ = os.path.join(self.base_dir , self.save_dir ) snake_case__ = self.batch_size_per_device * jax.device_count() @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = 42 _UpperCAmelCase = 4_0_9_6 # no dynamic padding on TPUs def __call__( self: Optional[Any] , UpperCamelCase: Optional[Any] ) -> str: snake_case__ = self.collate_fn(UpperCamelCase ) snake_case__ = jax.tree_util.tree_map(UpperCamelCase , UpperCamelCase ) return batch def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Optional[int] ) -> Tuple: snake_case__ , snake_case__ = self.fetch_inputs(features['input_ids'] ) snake_case__ = { 'input_ids': jnp.array(UpperCamelCase , dtype=jnp.intaa ), 'attention_mask': jnp.array(UpperCamelCase , dtype=jnp.intaa ), 'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ), 'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ), 'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ), } return batch def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: list ) -> List[Any]: snake_case__ = [self._fetch_inputs(UpperCamelCase ) for ids in input_ids] return zip(*UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: list ) -> List[str]: snake_case__ = [1 for _ in range(len(UpperCamelCase ) )] while len(UpperCamelCase ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def a_ ( _A , _A , _A=None ) -> Union[str, Any]: """simple docstring""" if seed is not None: snake_case__ = dataset.shuffle(seed=_A ) for i in range(len(_A ) // batch_size ): snake_case__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(_A ) @partial(jax.pmap , axis_name='batch' ) def a_ ( _A , _A , **_A ) -> int: """simple docstring""" def loss_fn(_A ): snake_case__ = model_inputs.pop('start_labels' ) snake_case__ = model_inputs.pop('end_labels' ) snake_case__ = model_inputs.pop('pooled_labels' ) snake_case__ = state.apply_fn(**_A , params=_A , dropout_rng=_A , train=_A ) snake_case__ , snake_case__ , snake_case__ = outputs return state.loss_fn( _A , _A , _A , _A , _A , _A , ) snake_case__ , snake_case__ = jax.random.split(_A ) snake_case__ = jax.value_and_grad(_A ) snake_case__ , snake_case__ = grad_fn(state.params ) snake_case__ = jax.lax.pmean({'loss': loss} , axis_name='batch' ) snake_case__ = jax.lax.pmean(_A , 'batch' ) snake_case__ = state.apply_gradients(grads=_A ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='batch' ) def a_ ( _A , **_A ) -> Any: """simple docstring""" snake_case__ = model_inputs.pop('start_labels' ) snake_case__ = model_inputs.pop('end_labels' ) snake_case__ = model_inputs.pop('pooled_labels' ) snake_case__ = state.apply_fn(**_A , params=state.params , train=_A ) snake_case__ , snake_case__ , snake_case__ = outputs snake_case__ = state.loss_fn(_A , _A , _A , _A , _A , _A ) snake_case__ = jax.lax.pmean({'loss': loss} , axis_name='batch' ) return metrics class __SCREAMING_SNAKE_CASE( train_state.TrainState ): _UpperCAmelCase = struct.field(pytree_node=a_ ) @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = None def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str]=None ) -> Union[str, Any]: snake_case__ = model.params snake_case__ = TrainState.create( apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , loss_fn=UpperCamelCase , ) if ckpt_dir is not None: snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = restore_checkpoint(UpperCamelCase , UpperCamelCase ) snake_case__ = { 'lr': args.lr, 'init_lr': args.init_lr, 'warmup_steps': args.warmup_steps, 'num_train_steps': num_train_steps, 'weight_decay': args.weight_decay, } snake_case__ , snake_case__ = build_tx(**UpperCamelCase ) snake_case__ = train_state.TrainState( step=UpperCamelCase , apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , opt_state=UpperCamelCase , ) snake_case__ = args snake_case__ = data_collator snake_case__ = lr snake_case__ = params snake_case__ = jax_utils.replicate(UpperCamelCase ) return state def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: int ) -> str: snake_case__ = self.args snake_case__ = len(UpperCamelCase ) // args.batch_size snake_case__ = jax.random.PRNGKey(0 ) snake_case__ = jax.random.split(UpperCamelCase , jax.device_count() ) for epoch in range(args.max_epochs ): snake_case__ = jnp.array(0 , dtype=jnp.floataa ) snake_case__ = get_batched_dataset(UpperCamelCase , args.batch_size , seed=UpperCamelCase ) snake_case__ = 0 for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc=F'''Running EPOCH-{epoch}''' ): snake_case__ = self.data_collator(UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ = self.train_step_fn(UpperCamelCase , UpperCamelCase , **UpperCamelCase ) running_loss += jax_utils.unreplicate(metrics['loss'] ) i += 1 if i % args.logging_steps == 0: snake_case__ = jax_utils.unreplicate(state.step ) snake_case__ = running_loss.item() / i snake_case__ = self.scheduler_fn(state_step - 1 ) snake_case__ = self.evaluate(UpperCamelCase , UpperCamelCase ) snake_case__ = { 'step': state_step.item(), 'eval_loss': eval_loss.item(), 'tr_loss': tr_loss, 'lr': lr.item(), } tqdm.write(str(UpperCamelCase ) ) self.logger.log(UpperCamelCase , commit=UpperCamelCase ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] ) -> str: snake_case__ = get_batched_dataset(UpperCamelCase , self.args.batch_size ) snake_case__ = len(UpperCamelCase ) // self.args.batch_size snake_case__ = jnp.array(0 , dtype=jnp.floataa ) snake_case__ = 0 for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc='Evaluating ... ' ): snake_case__ = self.data_collator(UpperCamelCase ) snake_case__ = self.val_step_fn(UpperCamelCase , **UpperCamelCase ) running_loss += jax_utils.unreplicate(metrics['loss'] ) i += 1 return running_loss / i def lowerCAmelCase_ ( self: Any , UpperCamelCase: str , UpperCamelCase: Dict ) -> str: snake_case__ = jax_utils.unreplicate(UpperCamelCase ) print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' ) self.model_save_fn(UpperCamelCase , params=state.params ) with open(os.path.join(UpperCamelCase , 'opt_state.msgpack' ) , 'wb' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(UpperCamelCase , 'args.joblib' ) ) joblib.dump(self.data_collator , os.path.join(UpperCamelCase , 'data_collator.joblib' ) ) with open(os.path.join(UpperCamelCase , 'training_state.json' ) , 'w' ) as f: json.dump({'step': state.step.item()} , UpperCamelCase ) print('DONE' ) def a_ ( _A , _A ) -> Any: """simple docstring""" print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' ) with open(os.path.join(_A , 'flax_model.msgpack' ) , 'rb' ) as f: snake_case__ = from_bytes(state.params , f.read() ) with open(os.path.join(_A , 'opt_state.msgpack' ) , 'rb' ) as f: snake_case__ = from_bytes(state.opt_state , f.read() ) snake_case__ = joblib.load(os.path.join(_A , 'args.joblib' ) ) snake_case__ = joblib.load(os.path.join(_A , 'data_collator.joblib' ) ) with open(os.path.join(_A , 'training_state.json' ) , 'r' ) as f: snake_case__ = json.load(_A ) snake_case__ = training_state['step'] print('DONE' ) return params, opt_state, step, args, data_collator def a_ ( _A , _A , _A , _A ) -> Union[str, Any]: """simple docstring""" snake_case__ = num_train_steps - warmup_steps snake_case__ = optax.linear_schedule(init_value=_A , end_value=_A , transition_steps=_A ) snake_case__ = optax.linear_schedule(init_value=_A , end_value=1e-7 , transition_steps=_A ) snake_case__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def a_ ( _A , _A , _A , _A , _A ) -> List[Any]: """simple docstring""" def weight_decay_mask(_A ): snake_case__ = traverse_util.flatten_dict(_A ) snake_case__ = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()} return traverse_util.unflatten_dict(_A ) snake_case__ = scheduler_fn(_A , _A , _A , _A ) snake_case__ = optax.adamw(learning_rate=_A , weight_decay=_A , mask=_A ) return tx, lr
307
import doctest from collections import deque import numpy as np class __SCREAMING_SNAKE_CASE: def __init__( self: Dict ) -> None: snake_case__ = [2, 1, 2, -1] snake_case__ = [1, 2, 3, 4] def lowerCAmelCase_ ( self: List[str] ) -> list[float]: snake_case__ = len(self.first_signal ) snake_case__ = len(self.second_signal ) snake_case__ = max(UpperCamelCase , UpperCamelCase ) # create a zero matrix of max_length x max_length snake_case__ = [[0] * max_length for i in range(UpperCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase ): snake_case__ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase ) for j, item in enumerate(UpperCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
307
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __UpperCamelCase : str = trt.Logger(trt.Logger.WARNING) __UpperCamelCase : List[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __UpperCamelCase : Tuple = logging.getLogger(__name__) __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) __UpperCamelCase : Optional[Any] = parser.parse_args() if args.tokenizer_name: __UpperCamelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) __UpperCamelCase : Dict = args.per_device_eval_batch_size __UpperCamelCase : Optional[int] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __UpperCamelCase : Optional[int] = True __UpperCamelCase : Tuple = """temp_engine/bert-fp32.engine""" if args.fpaa: __UpperCamelCase : Union[str, Any] = """temp_engine/bert-fp16.engine""" if args.inta: __UpperCamelCase : List[str] = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") __UpperCamelCase : Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __UpperCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] __UpperCamelCase : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __UpperCamelCase : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __UpperCamelCase : List[Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __UpperCamelCase : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def a_ ( _A , _A , _A , _A , _A , _A , _A , _A ) -> Optional[int]: """simple docstring""" snake_case__ = np.asarray(inputs['input_ids'] , dtype=np.intaa ) snake_case__ = np.asarray(inputs['attention_mask'] , dtype=np.intaa ) snake_case__ = np.asarray(inputs['token_type_ids'] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _A ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _A ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _A ) # start time snake_case__ = time.time() # Run inference context.execute_async( bindings=[int(_A ) for d_inp in d_inputs] + [int(_A ), int(_A )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_A , _A , _A ) cuda.memcpy_dtoh_async(_A , _A , _A ) # Synchronize the stream and take time stream.synchronize() # end time snake_case__ = time.time() snake_case__ = end_time - start_time snake_case__ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __UpperCamelCase : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __UpperCamelCase : Dict = raw_datasets["""validation"""].column_names __UpperCamelCase : List[str] = """question""" if """question""" in column_names else column_names[0] __UpperCamelCase : Any = """context""" if """context""" in column_names else column_names[1] __UpperCamelCase : Optional[Any] = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __UpperCamelCase : Any = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __UpperCamelCase : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length) def a_ ( _A ) -> int: """simple docstring""" # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace snake_case__ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. snake_case__ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_A , stride=args.doc_stride , return_overflowing_tokens=_A , return_offsets_mapping=_A , padding='max_length' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. snake_case__ = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. snake_case__ = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). snake_case__ = tokenized_examples.sequence_ids(_A ) snake_case__ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. snake_case__ = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. snake_case__ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples __UpperCamelCase : List[str] = raw_datasets["""validation"""] # Validation Feature Creation __UpperCamelCase : List[Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) __UpperCamelCase : Optional[Any] = default_data_collator __UpperCamelCase : Any = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) __UpperCamelCase : int = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def a_ ( _A , _A , _A , _A="eval" ) -> str: """simple docstring""" # Post-processing: we match the start logits and end logits to answers in the original context. snake_case__ = postprocess_qa_predictions( examples=_A , features=_A , predictions=_A , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_A , ) # Format the result to the format the metric expects. if args.version_2_with_negative: snake_case__ = [ {'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items() ] else: snake_case__ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()] snake_case__ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_A , label_ids=_A ) __UpperCamelCase : Union[str, Any] = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def a_ ( _A ) -> int: """simple docstring""" return trt.volume(engine.get_binding_shape(_A ) ) * engine.get_binding_dtype(_A ).itemsize # Allocate device memory for inputs and outputs. __UpperCamelCase : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __UpperCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __UpperCamelCase : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __UpperCamelCase : str = cuda.mem_alloc(h_outputa.nbytes) __UpperCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __UpperCamelCase : Tuple = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __UpperCamelCase : Optional[int] = 0.0 __UpperCamelCase : Tuple = 0 __UpperCamelCase : str = timeit.default_timer() __UpperCamelCase : str = None for step, batch in enumerate(eval_dataloader): __UpperCamelCase , __UpperCamelCase : List[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __UpperCamelCase , __UpperCamelCase : Any = outputs __UpperCamelCase : Union[str, Any] = torch.tensor(start_logits) __UpperCamelCase : List[Any] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __UpperCamelCase : Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __UpperCamelCase : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __UpperCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __UpperCamelCase : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset)) __UpperCamelCase : int = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000)) logger.info("""Total Number of Inference = %d""", niter) __UpperCamelCase : Tuple = post_processing_function(eval_examples, eval_dataset, all_preds) __UpperCamelCase : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) snake_case__ = [] for i in range(_A ): snake_case__ = i / num_diffusion_timesteps snake_case__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str: if trained_betas is not None: snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) snake_case__ = 1.0 - self.betas snake_case__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = use_karras_sigmas def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str: if schedule_timesteps is None: snake_case__ = self.timesteps snake_case__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0 else: snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep snake_case__ = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: snake_case__ = self.index_for_timestep(UpperCamelCase ) snake_case__ = self.sigmas[step_index] snake_case__ = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str: snake_case__ = num_inference_steps snake_case__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": snake_case__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": snake_case__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) snake_case__ = np.log(UpperCamelCase ) snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase ) if self.config.use_karras_sigmas: snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps ) snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] ) snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ) snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) snake_case__ = torch.from_numpy(UpperCamelCase ) snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase ).startswith('mps' ): # mps does not support float64 snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa ) else: snake_case__ = timesteps.to(device=UpperCamelCase ) # empty dt and derivative snake_case__ = None snake_case__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter snake_case__ = defaultdict(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple: # get log sigma snake_case__ = np.log(UpperCamelCase ) # get distribution snake_case__ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) snake_case__ = low_idx + 1 snake_case__ = log_sigmas[low_idx] snake_case__ = log_sigmas[high_idx] # interpolate sigmas snake_case__ = (low - log_sigma) / (low - high) snake_case__ = np.clip(UpperCamelCase , 0 , 1 ) # transform interpolation to time range snake_case__ = (1 - w) * low_idx + w * high_idx snake_case__ = t.reshape(sigma.shape ) return t def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor: snake_case__ = in_sigmas[-1].item() snake_case__ = in_sigmas[0].item() snake_case__ = 7.0 # 7.0 is the value used in the paper snake_case__ = np.linspace(0 , 1 , UpperCamelCase ) snake_case__ = sigma_min ** (1 / rho) snake_case__ = sigma_max ** (1 / rho) snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: return self.dt is None def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: snake_case__ = self.index_for_timestep(UpperCamelCase ) # advance index counter by 1 snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: snake_case__ = self.sigmas[step_index] snake_case__ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method snake_case__ = self.sigmas[step_index - 1] snake_case__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API snake_case__ = 0 snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": snake_case__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: snake_case__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order snake_case__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep snake_case__ = sigma_next - sigma_hat # store for 2nd order step snake_case__ = derivative snake_case__ = dt snake_case__ = sample else: # 2. 2nd order / Heun's method snake_case__ = (sample - pred_original_sample) / sigma_next snake_case__ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample snake_case__ = self.dt snake_case__ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ): # mps does not support float64 snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: snake_case__ = self.timesteps.to(original_samples.device ) snake_case__ = timesteps.to(original_samples.device ) snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps] snake_case__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): snake_case__ = sigma.unsqueeze(-1 ) snake_case__ = original_samples + noise * sigma return noisy_samples def __len__( self: List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
307
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __UpperCamelCase : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class __SCREAMING_SNAKE_CASE( a_ ): def __init__( self: Optional[Any] , **UpperCamelCase: str ) -> Optional[int]: super().__init__(**UpperCamelCase ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self: Optional[int] , UpperCamelCase: Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase: List[Any] ) -> Optional[int]: return super().__call__(UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , **UpperCamelCase: int ) -> Dict: snake_case__ = {} if "candidate_labels" in kwargs: snake_case__ = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: snake_case__ = kwargs['hypothesis_template'] return preprocess_params, {}, {} def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: List[Any]="This is a photo of {}." ) -> Tuple: snake_case__ = load_image(UpperCamelCase ) snake_case__ = self.image_processor(images=[image] , return_tensors=self.framework ) snake_case__ = candidate_labels snake_case__ = [hypothesis_template.format(UpperCamelCase ) for x in candidate_labels] snake_case__ = self.tokenizer(UpperCamelCase , return_tensors=self.framework , padding=UpperCamelCase ) snake_case__ = [text_inputs] return inputs def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any ) -> Tuple: snake_case__ = model_inputs.pop('candidate_labels' ) snake_case__ = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , UpperCamelCase ): snake_case__ = text_inputs[0] else: # Batching case. snake_case__ = text_inputs[0][0] snake_case__ = self.model(**UpperCamelCase , **UpperCamelCase ) snake_case__ = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Optional[Any] ) -> str: snake_case__ = model_outputs.pop('candidate_labels' ) snake_case__ = model_outputs['logits'][0] if self.framework == "pt": snake_case__ = logits.softmax(dim=-1 ).squeeze(-1 ) snake_case__ = probs.tolist() if not isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [scores] elif self.framework == "tf": snake_case__ = stable_softmax(UpperCamelCase , axis=-1 ) snake_case__ = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) snake_case__ = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase , UpperCamelCase ) , key=lambda UpperCamelCase : -x[0] ) ] return result
307
from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCamelCase : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) __UpperCamelCase : Any = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "funnel" _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self: List[str] , UpperCamelCase: Optional[int]=3_05_22 , UpperCamelCase: Dict=[4, 4, 4] , UpperCamelCase: Any=None , UpperCamelCase: Tuple=2 , UpperCamelCase: List[Any]=7_68 , UpperCamelCase: Any=12 , UpperCamelCase: List[Any]=64 , UpperCamelCase: Any=30_72 , UpperCamelCase: Union[str, Any]="gelu_new" , UpperCamelCase: int=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: Any=0.1 , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=1e-9 , UpperCamelCase: Dict="mean" , UpperCamelCase: Optional[int]="relative_shift" , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Tuple=True , UpperCamelCase: Dict=True , **UpperCamelCase: List[str] , ) -> List[str]: snake_case__ = vocab_size snake_case__ = block_sizes snake_case__ = [1] * len(UpperCamelCase ) if block_repeats is None else block_repeats assert len(UpperCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." snake_case__ = num_decoder_layers snake_case__ = d_model snake_case__ = n_head snake_case__ = d_head snake_case__ = d_inner snake_case__ = hidden_act snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = initializer_range snake_case__ = initializer_std snake_case__ = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' snake_case__ = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' snake_case__ = attention_type snake_case__ = separate_cls snake_case__ = truncate_seq snake_case__ = pool_q_only super().__init__(**UpperCamelCase ) @property def lowerCAmelCase_ ( self: Optional[Any] ) -> str: return sum(self.block_sizes ) @num_hidden_layers.setter def lowerCAmelCase_ ( self: int , UpperCamelCase: str ) -> Dict: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: return len(self.block_sizes ) @num_blocks.setter def lowerCAmelCase_ ( self: int , UpperCamelCase: Tuple ) -> Optional[int]: raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
307
def a_ ( _A , _A ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def a_ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
307
1
import os import pytest from attr import dataclass __UpperCamelCase : Any = """us-east-1""" # defaults region @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = 42 _UpperCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role" _UpperCAmelCase = { "task_name": "mnli", "per_device_train_batch_size": 1_6, "per_device_eval_batch_size": 1_6, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_0_0, "save_steps": 5_5_0_0, } _UpperCAmelCase = {**hyperparameters, "max_steps": 1_0_0_0} @property def lowerCAmelCase_ ( self: str ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self: Dict ) -> str: return F'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self: Tuple ) -> str: return F'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def a_ ( _A ) -> Dict: """simple docstring""" snake_case__ = SageMakerTestEnvironment(framework=request.cls.framework )
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
1
import math def a_ ( _A ) -> bool: """simple docstring""" snake_case__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(_A ) def a_ ( _A = 1 / 12345 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = 0 snake_case__ = 3 while True: snake_case__ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(_A ): snake_case__ = int(_A ) total_partitions += 1 if check_partition_perfect(_A ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(_A ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = WavaVecaPhonemeCTCTokenizer _UpperCAmelCase = False def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: super().setUp() snake_case__ = ( '<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ' 'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ' 'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ' 'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ' 'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ' 'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ' 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' 'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ' 'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ' 'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ' 'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ' 'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ' 'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4' ).split(' ' ) snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) snake_case__ = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'} snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(UpperCamelCase ) + '\n' ) def lowerCAmelCase_ ( self: str , UpperCamelCase: Tuple , UpperCamelCase: Optional[int]=False , UpperCamelCase: List[str]=20 , UpperCamelCase: Any=5 ) -> Tuple[str, list]: snake_case__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase )) for i in range(len(UpperCamelCase ) )] snake_case__ = list(filter(lambda UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase ) , UpperCamelCase ) ) if max_length is not None and len(UpperCamelCase ) > max_length: snake_case__ = toks[:max_length] if min_length is not None and len(UpperCamelCase ) < min_length and len(UpperCamelCase ) > 0: while len(UpperCamelCase ) < min_length: snake_case__ = toks + toks # toks_str = [t[1] for t in toks] snake_case__ = [t[0] for t in toks] # Ensure consistency snake_case__ = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) if " " not in output_txt and len(UpperCamelCase ) > 1: snake_case__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase ) ) if with_prefix_space: snake_case__ = ' ' + output_txt snake_case__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) return output_txt, output_ids def lowerCAmelCase_ ( self: Optional[Any] , **UpperCamelCase: Any ) -> Tuple: kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] ) -> str: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) # check adding a single token tokenizer.add_tokens('xxx' ) snake_case__ = tokenizer('m xxx ɪ' , do_phonemize=UpperCamelCase ).input_ids self.assertEqual(UpperCamelCase , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] ) snake_case__ = tokenizer('m aaa ɪ ccc' , do_phonemize=UpperCamelCase ).input_ids self.assertEqual(UpperCamelCase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa snake_case__ = tokenizer('maɪ c' , do_phonemize=UpperCamelCase ).input_ids self.assertEqual(UpperCamelCase , [3, 2_00] ) # mai should be <unk> (=3) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) self.assertEqual(UpperCamelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(UpperCamelCase ).input_ids , tokenizer(UpperCamelCase , do_phonemize=UpperCamelCase ).input_ids ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) snake_case__ = tokenizer.decode(tokenizer(UpperCamelCase ).input_ids ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] snake_case__ = tokenizer.decode(sample_ids[0] ) snake_case__ = tokenizer.batch_decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , batch_tokens[0] ) self.assertEqual(UpperCamelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) def lowerCAmelCase_ ( self: str ) -> int: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) self.assertEqual(UpperCamelCase , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' ) def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(UpperCamelCase ).input_ids , tokenizer(UpperCamelCase , do_phonemize=UpperCamelCase ).input_ids ) def lowerCAmelCase_ ( self: str ) -> int: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off snake_case__ = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter snake_case__ = tokenizer.decode(sample_ids[0] ) snake_case__ = tokenizer.batch_decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , batch_tokens[0] ) self.assertEqual(UpperCamelCase , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) # decode with no word_del_token filter snake_case__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase ) snake_case__ = tokenizer.batch_decode(UpperCamelCase , filter_word_delimiter_token=UpperCamelCase ) self.assertEqual(UpperCamelCase , batch_tokens[0] ) self.assertEqual(UpperCamelCase , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) snake_case__ = tokenizer.decode(tokenizer(UpperCamelCase ).input_ids , filter_word_delimiter_token=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer.phonemize(UpperCamelCase , phonemizer_lang='en-us' ) snake_case__ = tokenizer.decode(tokenizer(UpperCamelCase ).input_ids , filter_word_delimiter_token=UpperCamelCase ) self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: snake_case__ = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=UpperCamelCase ) snake_case__ = 'Hello how are you' snake_case__ = tokenizer(UpperCamelCase , phonemizer_lang='en-us' ).input_ids snake_case__ = tokenizer(UpperCamelCase , phonemizer_lang='fr-fr' ).input_ids self.assertNotEqual(UpperCamelCase , UpperCamelCase ) snake_case__ = tokenizer.decode(UpperCamelCase ) snake_case__ = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , 'h ə l oʊ h aʊ ɑːɹ j uː' ) self.assertEqual(UpperCamelCase , 'ɛ l o h aʊ a ʁ j u' ) def lowerCAmelCase_ ( self: str ) -> Optional[Any]: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case__ = 'Hello how Are you' snake_case__ = 'hello how are you' snake_case__ = tokenizer(UpperCamelCase ).input_ids snake_case__ = tokenizer(UpperCamelCase ).input_ids self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: snake_case__ = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) tokenizer.add_tokens(['!', '?'] ) tokenizer.add_special_tokens({'cls_token': '$$$'} ) # fmt: off snake_case__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on snake_case__ = tokenizer.batch_decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase: List[Any] , UpperCamelCase: List[str] ) -> Dict: snake_case__ = [d[key] for d in offsets] return retrieved_list def lowerCAmelCase_ ( self: int ) -> Optional[int]: snake_case__ = self.get_tokenizer(word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" snake_case__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on snake_case__ = tokenizer.decode(UpperCamelCase , output_char_offsets=UpperCamelCase , filter_word_delimiter_token=UpperCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('text' in outputs ) self.assertTrue('char_offsets' in outputs ) self.assertTrue(isinstance(UpperCamelCase , UpperCamelCase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def lowerCAmelCase_ ( self: str ) -> int: snake_case__ = self.get_tokenizer(word_delimiter_token='|' ) def check_list_tuples_equal(UpperCamelCase: Any , UpperCamelCase: Optional[int] ): self.assertTrue(isinstance(UpperCamelCase , UpperCamelCase ) ) self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase ) ) # transform list to ModelOutput snake_case__ = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] ) def recursive_check(UpperCamelCase: Any , UpperCamelCase: List[Any] ): if isinstance(UpperCamelCase , UpperCamelCase ): [recursive_check(UpperCamelCase , UpperCamelCase ) for la, la in zip(UpperCamelCase , UpperCamelCase )] self.assertEqual(UpperCamelCase , UpperCamelCase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] ) # fmt: off snake_case__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char snake_case__ = tokenizer.batch_decode(UpperCamelCase , output_char_offsets=UpperCamelCase ) snake_case__ = [tokenizer.decode(UpperCamelCase , output_char_offsets=UpperCamelCase ) for ids in sample_ids] check_list_tuples_equal(UpperCamelCase , UpperCamelCase ) @unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' ) def lowerCAmelCase_ ( self: Tuple ) -> Tuple: pass @unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' ) def lowerCAmelCase_ ( self: List[Any] ) -> Tuple: pass @unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' ) def lowerCAmelCase_ ( self: Dict ) -> List[str]: pass @unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: pass def lowerCAmelCase_ ( self: List[Any] ) -> str: snake_case__ = self.get_tokenizers(do_lower_case=UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case__ = tokenizer.vocab_size snake_case__ = len(UpperCamelCase ) self.assertNotEqual(UpperCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case__ = ['aaaaa bbbbbb', 'cccccccccdddddddd'] snake_case__ = tokenizer.add_tokens(UpperCamelCase ) snake_case__ = tokenizer.vocab_size snake_case__ = len(UpperCamelCase ) self.assertNotEqual(UpperCamelCase , 0 ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , len(UpperCamelCase ) ) self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) ) snake_case__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase ) self.assertGreaterEqual(len(UpperCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case__ = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} snake_case__ = tokenizer.add_special_tokens(UpperCamelCase ) snake_case__ = tokenizer.vocab_size snake_case__ = len(UpperCamelCase ) self.assertNotEqual(UpperCamelCase , 0 ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , len(UpperCamelCase ) ) self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) ) snake_case__ = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase ) self.assertGreaterEqual(len(UpperCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def lowerCAmelCase_ ( self: Any ) -> Dict: pass @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def lowerCAmelCase_ ( self: Dict ) -> int: pass def lowerCAmelCase_ ( self: List[str] ) -> str: # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. snake_case__ = self.get_tokenizers(fast=UpperCamelCase , do_lower_case=UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case__ = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't'] snake_case__ = tokenizer.convert_tokens_to_string(UpperCamelCase ) self.assertIsInstance(output['text'] , UpperCamelCase )
307
import random from typing import Any def a_ ( _A ) -> list[Any]: """simple docstring""" for _ in range(len(_A ) ): snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": __UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
307
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def __init__( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: Dict=18 , UpperCamelCase: Optional[Any]=30 , UpperCamelCase: Union[str, Any]=4_00 , UpperCamelCase: Any=True , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[int]=True , UpperCamelCase: int=None , UpperCamelCase: str=True , UpperCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , ) -> Union[str, Any]: snake_case__ = size if size is not None else {'shortest_edge': 18} snake_case__ = crop_size if crop_size is not None else {'height': 18, 'width': 18} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_normalize snake_case__ = image_mean snake_case__ = image_std def lowerCAmelCase_ ( self: Any ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = LevitImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]: snake_case__ = LevitImageProcessingTester(self ) @property def lowerCAmelCase_ ( self: int ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self: str ) -> int: snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCamelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase , 'size' ) ) def lowerCAmelCase_ ( self: int ) -> Optional[Any]: snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowerCAmelCase_ ( self: Any ) -> Tuple: pass def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCAmelCase_ ( self: Dict ) -> Any: # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
307
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
1
from __future__ import annotations def a_ ( _A , _A ) -> list[list[int]]: """simple docstring""" snake_case__ = [] create_all_state(1 , _A , _A , [] , _A ) return result def a_ ( _A , _A , _A , _A , _A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(_A , total_number - level + 2 ): current_list.append(_A ) create_all_state(i + 1 , _A , level - 1 , _A , _A ) current_list.pop() def a_ ( _A ) -> None: """simple docstring""" for i in total_list: print(*_A ) if __name__ == "__main__": __UpperCamelCase : int = 4 __UpperCamelCase : str = 2 __UpperCamelCase : List[Any] = generate_all_combinations(n, k) print_all_state(total_list)
307
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["image_processor", "tokenizer"] _UpperCAmelCase = "LayoutLMv2ImageProcessor" _UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase , ) snake_case__ = kwargs.pop('feature_extractor' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ = features['words'] snake_case__ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values snake_case__ = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] ) snake_case__ = images return encoded_inputs def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: str ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , ) return self.image_processor
307
1
import numpy as np from PIL import Image def a_ ( _A , _A , _A ) -> np.ndarray: """simple docstring""" snake_case__ = np.array(_A ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) snake_case__ = 0 snake_case__ = 0 snake_case__ = 0 snake_case__ = 0 # compute the shape of the output matrix snake_case__ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape snake_case__ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix snake_case__ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case__ = 0 snake_case__ = 0 return updated_arr def a_ ( _A , _A , _A ) -> np.ndarray: """simple docstring""" snake_case__ = np.array(_A ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) snake_case__ = 0 snake_case__ = 0 snake_case__ = 0 snake_case__ = 0 # compute the shape of the output matrix snake_case__ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape snake_case__ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix snake_case__ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case__ = 0 snake_case__ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image __UpperCamelCase : List[str] = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
307
def a_ ( _A = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
307
1
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ): _UpperCAmelCase = StableDiffusionDiffEditPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} _UpperCAmelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _UpperCAmelCase = frozenset([] ) def lowerCAmelCase_ ( self: List[Any] ) -> str: torch.manual_seed(0 ) snake_case__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , ) snake_case__ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , ) snake_case__ = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_zero=UpperCamelCase , ) torch.manual_seed(0 ) snake_case__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) snake_case__ = CLIPTextModel(UpperCamelCase ) snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case__ = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict=0 ) -> List[str]: snake_case__ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) snake_case__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('mps' ): snake_case__ = torch.manual_seed(UpperCamelCase ) else: snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) snake_case__ = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict=0 ) -> Dict: snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('RGB' ) if str(UpperCamelCase ).startswith('mps' ): snake_case__ = torch.manual_seed(UpperCamelCase ) else: snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) snake_case__ = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Tuple=0 ) -> List[Any]: snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('RGB' ) if str(UpperCamelCase ).startswith('mps' ): snake_case__ = torch.manual_seed(UpperCamelCase ) else: snake_case__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) snake_case__ = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: if not hasattr(self.pipeline_class , '_optional_components' ): return snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) snake_case__ = self.get_dummy_inputs(UpperCamelCase ) snake_case__ = pipe(**UpperCamelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase ) snake_case__ = self.pipeline_class.from_pretrained(UpperCamelCase ) pipe_loaded.to(UpperCamelCase ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase , UpperCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) snake_case__ = self.get_dummy_inputs(UpperCamelCase ) snake_case__ = pipe_loaded(**UpperCamelCase )[0] snake_case__ = np.abs(output - output_loaded ).max() self.assertLess(UpperCamelCase , 1e-4 ) def lowerCAmelCase_ ( self: int ) -> List[str]: snake_case__ = 'cpu' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = self.get_dummy_mask_inputs(UpperCamelCase ) snake_case__ = pipe.generate_mask(**UpperCamelCase ) snake_case__ = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) snake_case__ = np.array([0] * 9 ) snake_case__ = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ = 'cpu' snake_case__ = self.get_dummy_components() snake_case__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = self.get_dummy_inversion_inputs(UpperCamelCase ) snake_case__ = pipe.invert(**UpperCamelCase ).images snake_case__ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) snake_case__ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase , 1e-3 ) def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]: snake_case__ = 'cpu' snake_case__ = self.get_dummy_components() snake_case__ = {'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} snake_case__ = DPMSolverMultistepScheduler(**UpperCamelCase ) snake_case__ = DPMSolverMultistepInverseScheduler(**UpperCamelCase ) snake_case__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = self.get_dummy_inversion_inputs(UpperCamelCase ) snake_case__ = pipe.invert(**UpperCamelCase ).images snake_case__ = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) snake_case__ = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase , 1e-3 ) @require_torch_gpu @slow class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: Tuple ) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def lowerCAmelCase_ ( cls: List[Any] ) -> int: snake_case__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) snake_case__ = raw_image.convert('RGB' ).resize((7_68, 7_68) ) snake_case__ = raw_image def lowerCAmelCase_ ( self: str ) -> int: snake_case__ = torch.manual_seed(0 ) snake_case__ = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ = DDIMScheduler.from_config(pipe.scheduler.config ) snake_case__ = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = 'a bowl of fruit' snake_case__ = 'a bowl of pears' snake_case__ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase , target_prompt=UpperCamelCase , generator=UpperCamelCase , ) snake_case__ = pipe.invert( prompt=UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase ).latents snake_case__ = pipe( prompt=UpperCamelCase , mask_image=UpperCamelCase , image_latents=UpperCamelCase , generator=UpperCamelCase , negative_prompt=UpperCamelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0] snake_case__ = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]: snake_case__ = torch.manual_seed(0 ) snake_case__ = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa ) snake_case__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) snake_case__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase ) snake_case__ = 'a bowl of fruit' snake_case__ = 'a bowl of pears' snake_case__ = pipe.generate_mask( image=self.raw_image , source_prompt=UpperCamelCase , target_prompt=UpperCamelCase , generator=UpperCamelCase , ) snake_case__ = pipe.invert( prompt=UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase , num_inference_steps=25 , ).latents snake_case__ = pipe( prompt=UpperCamelCase , mask_image=UpperCamelCase , image_latents=UpperCamelCase , generator=UpperCamelCase , negative_prompt=UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] snake_case__ = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
307
import os def a_ ( ) -> Optional[Any]: """simple docstring""" snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' ) with open(_A ) as file_hand: return str(sum(int(_A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
307
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = """Hello, World!""" __UpperCamelCase : Union[str, Any] = """en_XX""" def a_ ( _A , _A , _A ) -> Optional[int]: """simple docstring""" snake_case__ = Path('data_bin' ) snake_case__ = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_A ).parent ) , checkpoint_file=Path(_A ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_A ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_A ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , ) xmod.eval() # disable dropout print(_A ) snake_case__ = xmod.model.encoder.sentence_encoder snake_case__ = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: snake_case__ = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our X-MOD config:' , _A ) snake_case__ = XmodForSequenceClassification(_A ) if classification_head else XmodForMaskedLM(_A ) model.eval() # Now let's copy all the weights. # Embeddings snake_case__ = xmod_sent_encoder.embed_tokens.weight snake_case__ = xmod_sent_encoder.embed_positions.weight snake_case__ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. snake_case__ = xmod_sent_encoder.layernorm_embedding.weight snake_case__ = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer snake_case__ = model.roberta.encoder.layer[i] snake_case__ = xmod_sent_encoder.layers[i] # self attention snake_case__ = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('Dimensions of self-attention weights do not match.' ) snake_case__ = xmod_layer.self_attn.q_proj.weight snake_case__ = xmod_layer.self_attn.q_proj.bias snake_case__ = xmod_layer.self_attn.k_proj.weight snake_case__ = xmod_layer.self_attn.k_proj.bias snake_case__ = xmod_layer.self_attn.v_proj.weight snake_case__ = xmod_layer.self_attn.v_proj.bias # self-attention output snake_case__ = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('Dimensions of self-attention output weights do not match.' ) snake_case__ = xmod_layer.self_attn.out_proj.weight snake_case__ = xmod_layer.self_attn.out_proj.bias snake_case__ = xmod_layer.self_attn_layer_norm.weight snake_case__ = xmod_layer.self_attn_layer_norm.bias # intermediate snake_case__ = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of intermediate weights do not match.' ) snake_case__ = xmod_layer.fca.weight snake_case__ = xmod_layer.fca.bias # output snake_case__ = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('Dimensions of feed-forward weights do not match.' ) snake_case__ = xmod_layer.fca.weight snake_case__ = xmod_layer.fca.bias snake_case__ = xmod_layer.final_layer_norm.weight snake_case__ = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: snake_case__ = xmod_layer.adapter_layer_norm.weight snake_case__ = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('Lists of language adapters do not match.' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): snake_case__ = bert_output.adapter_modules[lang_code] snake_case__ = xmod_layer.adapter_modules[lang_code] snake_case__ = from_adapter.fca.weight snake_case__ = from_adapter.fca.bias snake_case__ = from_adapter.fca.weight snake_case__ = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: snake_case__ = xmod_sent_encoder.layer_norm.weight snake_case__ = xmod_sent_encoder.layer_norm.bias if classification_head: snake_case__ = xmod.model.classification_heads['mnli'].dense.weight snake_case__ = xmod.model.classification_heads['mnli'].dense.bias snake_case__ = xmod.model.classification_heads['mnli'].out_proj.weight snake_case__ = xmod.model.classification_heads['mnli'].out_proj.bias else: # LM Head snake_case__ = xmod.model.encoder.lm_head.dense.weight snake_case__ = xmod.model.encoder.lm_head.dense.bias snake_case__ = xmod.model.encoder.lm_head.layer_norm.weight snake_case__ = xmod.model.encoder.lm_head.layer_norm.bias snake_case__ = xmod.model.encoder.lm_head.weight snake_case__ = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. snake_case__ = xmod.encode(_A ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_A ) snake_case__ = model(_A )[0] if classification_head: snake_case__ = xmod.model.classification_heads['mnli'](xmod.extract_features(_A ) ) else: snake_case__ = xmod.model(_A , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) snake_case__ = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 snake_case__ = torch.allclose(_A , _A , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) Path(_A ).mkdir(parents=_A , exist_ok=_A ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_A ) if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) __UpperCamelCase : List[Any] = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
307
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
1
def a_ ( _A , _A = False ) -> str: """simple docstring""" if not isinstance(_A , _A ): snake_case__ = f'''Expected string as input, found {type(_A )}''' raise ValueError(_A ) if not isinstance(_A , _A ): snake_case__ = f'''Expected boolean as use_pascal parameter, found {type(_A )}''' raise ValueError(_A ) snake_case__ = input_str.split('_' ) snake_case__ = 0 if use_pascal else 1 snake_case__ = words[start_index:] snake_case__ = [word[0].upper() + word[1:] for word in words_to_capitalize] snake_case__ = '' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
307
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Any = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } __UpperCamelCase : Optional[Any] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def a_ ( _A , _A=1 , _A=256 ) -> str: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _A ) -> int: """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def a_ ( _A , _A , _A , _A=True ) -> List[str]: """simple docstring""" os.makedirs(_A , exist_ok=_A ) snake_case__ = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) snake_case__ = read_json(os.path.join(_A , 'params.json' ) ) snake_case__ = NUM_SHARDS[model_size] snake_case__ = params['n_layers'] snake_case__ = params['n_heads'] snake_case__ = n_heads // num_shards snake_case__ = params['dim'] snake_case__ = dim // n_heads snake_case__ = 10000.0 snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case__ = params['n_kv_heads'] # for GQA / MQA snake_case__ = n_heads_per_shard // num_key_value_heads snake_case__ = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case__ = n_heads snake_case__ = n_heads_per_shard snake_case__ = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case__ = [ torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(_A ) ] snake_case__ = 0 snake_case__ = {'weight_map': {}} for layer_i in range(_A ): snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case__ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) snake_case__ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = inv_freq for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case__ = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs snake_case__ = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256 snake_case__ = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def a_ ( _A , _A ) -> Tuple: """simple docstring""" # Initialize the tokenizer based on the `spm` model snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case__ = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def a_ ( ) -> str: """simple docstring""" snake_case__ = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) snake_case__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
307
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __UpperCamelCase : List[Any] = logging.getLogger(__name__) torch.set_grad_enabled(False) __UpperCamelCase : int = """cuda""" if torch.cuda.is_available() else """cpu""" def a_ ( _A , _A=100 , _A=" " ) -> List[str]: """simple docstring""" snake_case__ = text.split(_A ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_A ) , _A )] def a_ ( _A ) -> dict: """simple docstring""" snake_case__ , snake_case__ = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(_A ): titles.append(title if title is not None else '' ) texts.append(_A ) return {"title": titles, "text": texts} def a_ ( _A , _A , _A ) -> dict: """simple docstring""" snake_case__ = ctx_tokenizer( documents['title'] , documents['text'] , truncation=_A , padding='longest' , return_tensors='pt' )['input_ids'] snake_case__ = ctx_encoder(input_ids.to(device=_A ) , return_dict=_A ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def a_ ( _A , _A , _A , ) -> Optional[int]: """simple docstring""" ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way snake_case__ = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words snake_case__ = dataset.map(_A , batched=_A , num_proc=processing_args.num_proc ) # And compute the embeddings snake_case__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_A ) snake_case__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) snake_case__ = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space snake_case__ = dataset.map( partial(_A , ctx_encoder=_A , ctx_tokenizer=_A ) , batched=_A , batch_size=processing_args.batch_size , features=_A , ) # And finally save your dataset snake_case__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(_A ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search snake_case__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=_A ) # And save the index snake_case__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(_A ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = field( default=str(Path(a_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) _UpperCAmelCase = field( default=a_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) _UpperCAmelCase = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) _UpperCAmelCase = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) _UpperCAmelCase = field( default=str(Path(a_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = field( default=a_ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) _UpperCAmelCase = field( default=1_6 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class __SCREAMING_SNAKE_CASE: _UpperCAmelCase = field( default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) _UpperCAmelCase = field( default=1_2_8 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __UpperCamelCase : Optional[int] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __UpperCamelCase : List[str] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
307
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
import re def a_ ( _A ) -> bool: """simple docstring""" snake_case__ = re.compile( R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' ) return bool(re.search(_A , _A ) ) if __name__ == "__main__": __UpperCamelCase : Any = """0094702343221""" print(is_sri_lankan_phone_number(phone))
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType __UpperCamelCase : Any = get_logger(__name__) def a_ ( _A , _A , _A , _A , _A=0 ) -> Tuple: """simple docstring""" os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case__ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case__ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' snake_case__ = os.path.join(_A , _A ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(_A , _A ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case__ = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case__ = os.path.join(_A , _A ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(_A , _A ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case__ = os.path.join(_A , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_A , exist_ok=_A ) logger.info(f'''Saving model to {ckpt_dir}''' ) snake_case__ = {'model': state_dict} dist_cp.save_state_dict( state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def a_ ( _A , _A , _A , _A , _A=0 ) -> Tuple: """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_A ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case__ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' snake_case__ = os.path.join(_A , _A ) logger.info(f'''Loading model from {input_model_file}''' ) snake_case__ = torch.load(_A ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case__ = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) snake_case__ = os.path.join(_A , _A ) logger.info(f'''Loading model from {input_model_file}''' ) snake_case__ = torch.load(_A ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case__ = ( os.path.join(_A , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) snake_case__ = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , ) snake_case__ = state_dict['model'] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_A ) def a_ ( _A , _A , _A , _A , _A , _A=0 ) -> Any: """simple docstring""" os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case__ = FSDP.optim_state_dict(_A , _A ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case__ = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case__ = os.path.join(_A , _A ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_A , _A ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: snake_case__ = os.path.join(_A , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_A , exist_ok=_A ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def a_ ( _A , _A , _A , _A , _A , _A=0 ) -> Optional[Any]: """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case__ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case__ = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) snake_case__ = os.path.join(_A , _A ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) snake_case__ = torch.load(_A ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: snake_case__ = ( os.path.join(_A , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) snake_case__ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(_A ) , ) snake_case__ = optim_state['optimizer'] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) snake_case__ = FSDP.optim_state_dict_to_load(_A , _A , _A ) optimizer.load_state_dict(_A )
307
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __UpperCamelCase : int = 299792458 # Symbols __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""") def a_ ( _A ) -> float: """simple docstring""" if velocity > c: raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('Speed must be greater than or equal to 1!' ) return velocity / c def a_ ( _A ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(_A ) ** 2 ) def a_ ( _A ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0], [-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def a_ ( _A , _A = None ) -> np.ndarray: """simple docstring""" # Ensure event is not empty if event is None: snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_A ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __UpperCamelCase : List[Any] = transform(29979245) print("""Example of four vector: """) print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values __UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1} __UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
307
1
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: Optional[int]=13 , UpperCamelCase: int=7 , UpperCamelCase: int=True , UpperCamelCase: Dict=True , UpperCamelCase: List[Any]=True , UpperCamelCase: List[str]=True , UpperCamelCase: Any=99 , UpperCamelCase: List[Any]=64 , UpperCamelCase: int=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: Union[str, Any]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: List[str]=5_12 , UpperCamelCase: Dict=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[str]=3 , UpperCamelCase: int=4 , UpperCamelCase: int=None , ) -> str: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = embedding_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: str ) -> str: return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] ) -> List[Any]: snake_case__ = MobileBertModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) snake_case__ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ) -> List[Any]: snake_case__ = MobileBertForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: str , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] ) -> Any: snake_case__ = MobileBertForNextSentencePrediction(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] ) -> List[str]: snake_case__ = MobileBertForPreTraining(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] ) -> Optional[int]: snake_case__ = MobileBertForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple ) -> str: snake_case__ = self.num_labels snake_case__ = MobileBertForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: str ) -> List[Any]: snake_case__ = self.num_labels snake_case__ = MobileBertForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any ) -> Optional[int]: snake_case__ = self.num_choices snake_case__ = MobileBertForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { "feature-extraction": MobileBertModel, "fill-mask": MobileBertForMaskedLM, "question-answering": MobileBertForQuestionAnswering, "text-classification": MobileBertForSequenceClassification, "token-classification": MobileBertForTokenClassification, "zero-shot": MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int]=False ) -> int: snake_case__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class in get_values(UpperCamelCase ): snake_case__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase ) snake_case__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def lowerCAmelCase_ ( self: List[str] ) -> Dict: snake_case__ = MobileBertModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> List[str]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Any: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase ) def lowerCAmelCase_ ( self: str ) -> Optional[Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Optional[int]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] ) -> List[str]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase ) def a_ ( _A ) -> Union[str, Any]: """simple docstring""" return torch.tensor( _A , dtype=torch.long , device=_A , ) __UpperCamelCase : str = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @slow def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: snake_case__ = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(UpperCamelCase ) snake_case__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): snake_case__ = model(UpperCamelCase )[0] snake_case__ = torch.Size((1, 9, 5_12) ) self.assertEqual(output.shape , UpperCamelCase ) snake_case__ = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ] , device=UpperCamelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE snake_case__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) snake_case__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
307
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : int = {"""vocab_file""": """spiece.model"""} __UpperCamelCase : Any = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 __UpperCamelCase : Tuple = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } __UpperCamelCase : Optional[Any] = """▁""" class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) snake_case__ = legacy snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , ) snake_case__ = vocab_file snake_case__ = extra_ids snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) @staticmethod def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any: if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , ) return max_model_length @property def lowerCAmelCase_ ( self: Tuple ) -> List[str]: return self.sp_model.get_piece_size() + self._extra_ids def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase )) + [1] return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return list( set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple: return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()] def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]: if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case__ = self._add_eos_if_not_present(UpperCamelCase ) return token_ids_a + token_ids_a def __getstate__( self: Union[str, Any] ) -> List[str]: snake_case__ = self.__dict__.copy() snake_case__ = None return state def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]: snake_case__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case__ = {} snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]: # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' ) return super().tokenize(UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str: if not self.legacy: snake_case__ = text.startswith(UpperCamelCase ) if is_first: snake_case__ = text[1:] snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ): snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict: if token.startswith('<extra_id_' ): snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase ) snake_case__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple: if index < self.sp_model.get_piece_size(): snake_case__ = self.sp_model.IdToPiece(UpperCamelCase ) else: snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict: snake_case__ = [] snake_case__ = '' snake_case__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase ) + token snake_case__ = True snake_case__ = [] else: current_sub_tokens.append(UpperCamelCase ) snake_case__ = False out_string += self.sp_model.decode(UpperCamelCase ) return out_string.strip() def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ = os.path.join( UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , 'wb' ) as fi: snake_case__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,)
307
1
from __future__ import annotations def a_ ( _A ) -> list[int]: """simple docstring""" snake_case__ = [True] * limit snake_case__ = False snake_case__ = False snake_case__ = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ = i * 2 while index < limit: snake_case__ = False snake_case__ = index + i snake_case__ = [2] for i in range(3 , _A , 2 ): if is_prime[i]: primes.append(_A ) return primes def a_ ( _A = 1000000 ) -> int: """simple docstring""" snake_case__ = prime_sieve(_A ) snake_case__ = 0 snake_case__ = 0 for i in range(len(_A ) ): for j in range(i + length , len(_A ) ): snake_case__ = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ = j - i snake_case__ = sol return largest if __name__ == "__main__": print(f'''{solution() = }''')
307
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE: def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: List[str] ) -> Dict: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None if self.use_token_type_ids: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict: snake_case__ = LlamaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str: snake_case__ = True snake_case__ = LlamaModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any: snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]: snake_case__ = True snake_case__ = True snake_case__ = LlamaForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # first forward pass snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , ) snake_case__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0] # select random slice snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def lowerCAmelCase_ ( self: int ) -> Dict: snake_case__ = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ): _UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else () _UpperCAmelCase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = LlamaModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: int ) -> int: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'single_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = 3 snake_case__ = 'multi_label_classification' snake_case__ = input_dict['input_ids'] snake_case__ = input_ids.ne(1 ).to(UpperCamelCase ) snake_case__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case__ = LlamaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def lowerCAmelCase_ ( self: Dict ) -> Any: pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ = ids_tensor([1, 10] , config.vocab_size ) snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = LlamaModel(UpperCamelCase ) original_model.to(UpperCamelCase ) original_model.eval() snake_case__ = original_model(UpperCamelCase ).last_hidden_state snake_case__ = original_model(UpperCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights snake_case__ = {'type': scaling_type, 'factor': 10.0} snake_case__ = LlamaModel(UpperCamelCase ) scaled_model.to(UpperCamelCase ) scaled_model.eval() snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> str: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def lowerCAmelCase_ ( self: int ) -> List[Any]: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) # Expected mean on dim = -1 snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def lowerCAmelCase_ ( self: List[str] ) -> Tuple: snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) snake_case__ = model(torch.tensor(UpperCamelCase ) ) snake_case__ = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # fmt: off snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' snake_case__ = 'Simply put, the theory of relativity states that ' snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' ) snake_case__ = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase ) # greedy generation outputs snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase ) snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
307
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCamelCase : List[str] = logging.getLogger(__name__) def a_ ( _A=2 , _A=3 , _A=16 , _A = 10 , _A = 2 ) -> List[str]: """simple docstring""" def get_dataset(_A ): snake_case__ = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(_A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) snake_case__ = get_dataset(_A ) snake_case__ = get_dataset(_A ) snake_case__ = DataLoader(_A , shuffle=_A , batch_size=_A , num_workers=4 ) snake_case__ = DataLoader(_A , shuffle=_A , batch_size=_A , num_workers=4 ) return (train_dataloader, valid_dataloader) def a_ ( _A , _A , _A , _A , _A , _A=None ) -> List[Any]: """simple docstring""" snake_case__ = [] for epoch in range(_A ): # Train quickly model.train() for batch in dataloader: snake_case__ , snake_case__ = batch snake_case__ = model(_A ) snake_case__ = torch.nn.functional.mse_loss(_A , _A ) accelerator.backward(_A ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __SCREAMING_SNAKE_CASE( nn.Module ): def __init__( self: List[Any] ) -> Union[str, Any]: super().__init__() snake_case__ = nn.Parameter(torch.randn(1 ) ) snake_case__ = nn.Parameter(torch.randn(1 ) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str ) -> int: return x * self.a + self.b class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: List[Any] ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ = dummy_dataloaders() snake_case__ = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase , automatic_checkpoint_naming=UpperCamelCase ) # Train baseline snake_case__ = Accelerator(project_config=UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ = dummy_dataloaders() # Train baseline snake_case__ = Accelerator() snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save initial snake_case__ = os.path.join(UpperCamelCase , 'initial' ) accelerator.save_state(UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() snake_case__ = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() # Train partially set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ = dummy_dataloaders() snake_case__ = Accelerator() snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) accelerator.load_state(UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) snake_case__ = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save everything snake_case__ = os.path.join(UpperCamelCase , 'checkpoint' ) accelerator.save_state(UpperCamelCase ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase ) test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ = dummy_dataloaders() snake_case__ = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase ) # Train baseline snake_case__ = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save initial accelerator.save_state() ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() snake_case__ = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() # Train partially set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ , snake_case__ = dummy_dataloaders() snake_case__ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase ) snake_case__ = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) accelerator.load_state(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) snake_case__ = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((snake_case__) , (snake_case__)) = model.a.item(), model.b.item() snake_case__ = optimizer.state_dict() self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple ) -> Any: snake_case__ = torch.tensor([1, 2, 3] ) snake_case__ = torch.tensor([2, 3, 4] ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(net.parameters() ) snake_case__ = Accelerator() with self.assertRaises(UpperCamelCase ) as ve: accelerator.register_for_checkpointing(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def lowerCAmelCase_ ( self: List[Any] ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ = DummyModel() snake_case__ = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) snake_case__ = torch.optim.lr_scheduler.StepLR(UpperCamelCase , step_size=1 , gamma=0.99 ) snake_case__ , snake_case__ = dummy_dataloaders() snake_case__ = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase ) # Train baseline snake_case__ = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase ) snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save initial accelerator.save_state() snake_case__ = scheduler.state_dict() train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) self.assertNotEqual(UpperCamelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(UpperCamelCase , scheduler.state_dict() ) def lowerCAmelCase_ ( self: List[Any] ) -> Dict: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ = DummyModel() snake_case__ = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase , total_limit=2 ) # Train baseline snake_case__ = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase ) snake_case__ = accelerator.prepare(UpperCamelCase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case__ = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": __UpperCamelCase : Tuple = """/tmp/accelerate/state_checkpointing""" __UpperCamelCase : Dict = DummyModel() __UpperCamelCase : Dict = torch.optim.Adam(params=model.parameters(), lr=1E-3) __UpperCamelCase : Any = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) __UpperCamelCase , __UpperCamelCase : int = dummy_dataloaders() __UpperCamelCase : int = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCamelCase : int = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCamelCase , __UpperCamelCase : Optional[int] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCamelCase : List[str] = group["""params"""][0].device break assert param_device.type == accelerator.device.type __UpperCamelCase : Tuple = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: __UpperCamelCase : List[str] = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: __UpperCamelCase : int = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
307
from math import isclose, sqrt def a_ ( _A , _A , _A ) -> tuple[float, float, float]: """simple docstring""" snake_case__ = point_y / 4 / point_x snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case__ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case__ = outgoing_gradient**2 + 4 snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case__ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case__ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case__ = x_minus if isclose(_A , _A ) else x_plus snake_case__ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a_ ( _A = 1.4 , _A = -9.6 ) -> int: """simple docstring""" snake_case__ = 0 snake_case__ = first_x_coord snake_case__ = first_y_coord snake_case__ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
307
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def a_ ( _A , _A , _A = "x" , _A = 10**-10 , _A = 1 , ) -> complex: """simple docstring""" snake_case__ = symbols(_A ) snake_case__ = lambdify(_A , _A ) snake_case__ = lambdify(_A , diff(_A , _A ) ) snake_case__ = starting_point while True: if diff_function(_A ) != 0: snake_case__ = prev_guess - multiplicity * func(_A ) / diff_function( _A ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess snake_case__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''') # Find value of e print( """The root of log(y) - 1 = 0 is """, f'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
307
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class __SCREAMING_SNAKE_CASE( TensorFormatter[Mapping, "torch.Tensor", Mapping] ): def __init__( self: Any , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Union[str, Any] ) -> int: super().__init__(features=UpperCamelCase ) snake_case__ = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCAmelCase_ ( self: Any , UpperCamelCase: Any ) -> List[str]: import torch if isinstance(UpperCamelCase , UpperCamelCase ) and column: if all( isinstance(UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(UpperCamelCase ) return column def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> Union[str, Any]: import torch if isinstance(UpperCamelCase , (str, bytes, type(UpperCamelCase )) ): return value elif isinstance(UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ = {} if isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): snake_case__ = {'dtype': torch.intaa} elif isinstance(UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCamelCase , PIL.Image.Image ): snake_case__ = np.asarray(UpperCamelCase ) return torch.tensor(UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: str ) -> Any: import torch # support for torch, tf, jax etc. if hasattr(UpperCamelCase , '__array__' ) and not isinstance(UpperCamelCase , torch.Tensor ): snake_case__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) elif isinstance(UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: dict ) -> List[str]: return map_nested(self._recursive_tensorize , UpperCamelCase , map_list=UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_row(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_row(UpperCamelCase ) return self.recursive_tensorize(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: pa.Table ) -> "torch.Tensor": snake_case__ = self.numpy_arrow_extractor().extract_column(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_column(UpperCamelCase , pa_table.column_names[0] ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) snake_case__ = self._consolidate(UpperCamelCase ) return column def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: pa.Table ) -> Mapping: snake_case__ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase ) snake_case__ = self.python_features_decoder.decode_batch(UpperCamelCase ) snake_case__ = self.recursive_tensorize(UpperCamelCase ) for column_name in batch: snake_case__ = self._consolidate(batch[column_name] ) return batch
307
1
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: List[Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any: snake_case__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png' ) snake_case__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' ) snake_case__ = 'xvjiarui/stable-diffusion-2-inpainting' snake_case__ , snake_case__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase , safety_checker=UpperCamelCase ) snake_case__ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case__ = jax.random.PRNGKey(0 ) snake_case__ = 50 snake_case__ = jax.device_count() snake_case__ = num_samples * [prompt] snake_case__ = num_samples * [init_image] snake_case__ = num_samples * [mask_image] snake_case__ , snake_case__ , snake_case__ = pipeline.prepare_inputs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # shard inputs and rng snake_case__ = replicate(UpperCamelCase ) snake_case__ = jax.random.split(UpperCamelCase , jax.device_count() ) snake_case__ = shard(UpperCamelCase ) snake_case__ = shard(UpperCamelCase ) snake_case__ = shard(UpperCamelCase ) snake_case__ = pipeline( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ) snake_case__ = output.images.reshape(UpperCamelCase , 5_12 , 5_12 , 3 ) snake_case__ = images[0, 2_53:2_56, 2_53:2_56, -1] snake_case__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case__ = jnp.array( [0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
307
import doctest from collections import deque import numpy as np class __SCREAMING_SNAKE_CASE: def __init__( self: Dict ) -> None: snake_case__ = [2, 1, 2, -1] snake_case__ = [1, 2, 3, 4] def lowerCAmelCase_ ( self: List[str] ) -> list[float]: snake_case__ = len(self.first_signal ) snake_case__ = len(self.second_signal ) snake_case__ = max(UpperCamelCase , UpperCamelCase ) # create a zero matrix of max_length x max_length snake_case__ = [[0] * max_length for i in range(UpperCamelCase )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCamelCase ): snake_case__ = deque(self.second_signal ) rotated_signal.rotate(UpperCamelCase ) for j, item in enumerate(UpperCamelCase ): matrix[i][j] += item # multiply the matrix with the first signal snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(UpperCamelCase , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
307
1
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __SCREAMING_SNAKE_CASE( a_ ): def __init__( self: Tuple , UpperCamelCase: str , UpperCamelCase: List[Any]=13 , UpperCamelCase: Tuple=7 , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=False , UpperCamelCase: Any=True , UpperCamelCase: str=99 , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Any=4 , UpperCamelCase: int=37 , UpperCamelCase: Any="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Dict=5_12 , UpperCamelCase: str=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Tuple=3 , UpperCamelCase: Any=4 , UpperCamelCase: List[Any]=None , ) -> List[str]: snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_input_mask snake_case__ = use_token_type_ids snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = type_vocab_size snake_case__ = type_sequence_label_size snake_case__ = initializer_range snake_case__ = num_labels snake_case__ = num_choices snake_case__ = scope def lowerCAmelCase_ ( self: List[str] ) -> int: snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ = None if self.use_input_mask: snake_case__ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ = None snake_case__ = None snake_case__ = None if self.use_labels: snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self: Any ) -> int: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Tuple ) -> Optional[int]: snake_case__ = DistilBertModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , UpperCamelCase ) snake_case__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: Optional[int] ) -> List[Any]: snake_case__ = DistilBertForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] ) -> Any: snake_case__ = DistilBertForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any] ) -> List[str]: snake_case__ = self.num_labels snake_case__ = DistilBertForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Any ) -> int: snake_case__ = self.num_labels snake_case__ = DistilBertForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] ) -> str: snake_case__ = self.num_choices snake_case__ = DistilBertForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ = model( UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self: str ) -> List[str]: snake_case__ = self.prepare_config_and_inputs() ((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) = config_and_inputs snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ): _UpperCAmelCase = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _UpperCAmelCase = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True def lowerCAmelCase_ ( self: Dict ) -> int: snake_case__ = DistilBertModelTester(self ) snake_case__ = ConfigTester(self , config_class=UpperCamelCase , dim=37 ) def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]: self.config_tester.run_common_tests() def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] ) -> Dict: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase ) def lowerCAmelCase_ ( self: Tuple ) -> List[Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Dict: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase ) def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]: snake_case__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase ) @slow def lowerCAmelCase_ ( self: int ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ = DistilBertModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow @require_torch_gpu def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return snake_case__ = True snake_case__ = model_class(config=UpperCamelCase ) snake_case__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) snake_case__ = torch.jit.trace( UpperCamelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , 'traced_model.pt' ) ) snake_case__ = torch.jit.load(os.path.join(UpperCamelCase , 'traced_model.pt' ) , map_location=UpperCamelCase ) loaded(inputs_dict['input_ids'].to(UpperCamelCase ) , inputs_dict['attention_mask'].to(UpperCamelCase ) ) @require_torch class __SCREAMING_SNAKE_CASE( unittest.TestCase ): @slow def lowerCAmelCase_ ( self: Dict ) -> List[str]: snake_case__ = DistilBertModel.from_pretrained('distilbert-base-uncased' ) snake_case__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) snake_case__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] snake_case__ = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCamelCase ) snake_case__ = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_A ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_A ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) snake_case__ = [] for i in range(_A ): snake_case__ = i / num_diffusion_timesteps snake_case__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) ) return torch.tensor(_A , dtype=torch.floataa ) class __SCREAMING_SNAKE_CASE( a_ , a_ ): _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str: if trained_betas is not None: snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) snake_case__ = 1.0 - self.betas snake_case__ = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase ) snake_case__ = use_karras_sigmas def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str: if schedule_timesteps is None: snake_case__ = self.timesteps snake_case__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0 else: snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep snake_case__ = self._index_counter[timestep_int] return indices[pos].item() @property def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: snake_case__ = self.index_for_timestep(UpperCamelCase ) snake_case__ = self.sigmas[step_index] snake_case__ = sample / ((sigma**2 + 1) ** 0.5) return sample def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str: snake_case__ = num_inference_steps snake_case__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": snake_case__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": snake_case__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) snake_case__ = np.log(UpperCamelCase ) snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase ) if self.config.use_karras_sigmas: snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps ) snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] ) snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase ) snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) snake_case__ = torch.from_numpy(UpperCamelCase ) snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(UpperCamelCase ).startswith('mps' ): # mps does not support float64 snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa ) else: snake_case__ = timesteps.to(device=UpperCamelCase ) # empty dt and derivative snake_case__ = None snake_case__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter snake_case__ = defaultdict(UpperCamelCase ) def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple: # get log sigma snake_case__ = np.log(UpperCamelCase ) # get distribution snake_case__ = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) snake_case__ = low_idx + 1 snake_case__ = log_sigmas[low_idx] snake_case__ = log_sigmas[high_idx] # interpolate sigmas snake_case__ = (low - log_sigma) / (low - high) snake_case__ = np.clip(UpperCamelCase , 0 , 1 ) # transform interpolation to time range snake_case__ = (1 - w) * low_idx + w * high_idx snake_case__ = t.reshape(sigma.shape ) return t def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor: snake_case__ = in_sigmas[-1].item() snake_case__ = in_sigmas[0].item() snake_case__ = 7.0 # 7.0 is the value used in the paper snake_case__ = np.linspace(0 , 1 , UpperCamelCase ) snake_case__ = sigma_min ** (1 / rho) snake_case__ = sigma_max ** (1 / rho) snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]: return self.dt is None def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]: snake_case__ = self.index_for_timestep(UpperCamelCase ) # advance index counter by 1 snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: snake_case__ = self.sigmas[step_index] snake_case__ = self.sigmas[step_index + 1] else: # 2nd order / Heun's method snake_case__ = self.sigmas[step_index - 1] snake_case__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API snake_case__ = 0 snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": snake_case__ = sigma_hat if self.state_in_first_order else sigma_next snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": snake_case__ = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: snake_case__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order snake_case__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep snake_case__ = sigma_next - sigma_hat # store for 2nd order step snake_case__ = derivative snake_case__ = dt snake_case__ = sample else: # 2. 2nd order / Heun's method snake_case__ = (sample - pred_original_sample) / sigma_next snake_case__ = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample snake_case__ = self.dt snake_case__ = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" snake_case__ = None snake_case__ = None snake_case__ = None snake_case__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ): # mps does not support float64 snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa ) snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa ) else: snake_case__ = self.timesteps.to(original_samples.device ) snake_case__ = timesteps.to(original_samples.device ) snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps] snake_case__ = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): snake_case__ = sigma.unsqueeze(-1 ) snake_case__ = original_samples + noise * sigma return noisy_samples def __len__( self: List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
307
1
import functools def a_ ( _A , _A ) -> int: """simple docstring""" # Validation if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(_A ) == 0: return 0 if min(_A ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(_A ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case__ = set(_A ) @functools.cache def dynamic_programming(_A ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
307
from typing import TYPE_CHECKING from ..utils import _LazyModule __UpperCamelCase : Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
def a_ ( _A = 600851475143 ) -> int: """simple docstring""" try: snake_case__ = int(_A ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) snake_case__ = 2 snake_case__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 snake_case__ = i while n % i == 0: snake_case__ = n // i i += 1 return int(_A ) if __name__ == "__main__": print(f'''{solution() = }''')
307
def a_ ( _A , _A ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def a_ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
307
1
from __future__ import annotations def a_ ( _A , _A ) -> list[int]: """simple docstring""" snake_case__ = 0 snake_case__ = len(_A ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: snake_case__ = i + 1 else: snake_case__ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : int = imread(R"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = cn.convert_to_negative(_A ) # assert negative_img array for at least one True assert negative_img.any() def a_ ( ) -> int: """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(_A , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def a_ ( ) -> List[str]: """simple docstring""" snake_case__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def a_ ( ) -> Dict: """simple docstring""" snake_case__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() snake_case__ = canny.canny(_A ) # assert canny array for at least one True assert canny_array.any() def a_ ( ) -> Optional[int]: """simple docstring""" assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all() def a_ ( ) -> Optional[Any]: """simple docstring""" # laplace diagonals snake_case__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) snake_case__ = conv.img_convolve(_A , _A ).astype(_A ) assert res.any() def a_ ( ) -> Dict: """simple docstring""" assert med.median_filter(_A , 3 ).any() def a_ ( ) -> Dict: """simple docstring""" snake_case__ , snake_case__ = sob.sobel_filter(_A ) assert grad.any() and theta.any() def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = sp.make_sepia(_A , 20 ) assert sepia.all() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[int]: """simple docstring""" snake_case__ = bs.Burkes(imread(_A , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def a_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]: """simple docstring""" snake_case__ = rs.NearestNeighbour(imread(_A , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def a_ ( ) -> Any: """simple docstring""" snake_case__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. snake_case__ = imread(_A , 0 ) # Test for get_neighbors_pixel function() return not None snake_case__ = 0 snake_case__ = 0 snake_case__ = image[x_coordinate][y_coordinate] snake_case__ = lbp.get_neighbors_pixel( _A , _A , _A , _A ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image snake_case__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): snake_case__ = lbp.local_binary_value(_A , _A , _A ) assert lbp_image.any()
307
1
__UpperCamelCase : Any = [0, 2, 4, 6, 8] __UpperCamelCase : Optional[int] = [1, 3, 5, 7, 9] def a_ ( _A , _A , _A , _A ) -> int: """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _A , _A ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _A , _A , ) return result def a_ ( _A = 9 ) -> int: """simple docstring""" snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_A , 0 , [0] * length , _A ) return result if __name__ == "__main__": print(f'''{solution() = }''')
307
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __UpperCamelCase : List[str] = re.compile(R"""\s+""") def a_ ( _A ) -> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(_A , '' , example['content'] ).encode('utf-8' ) ).hexdigest()} def a_ ( _A ) -> Optional[Any]: """simple docstring""" snake_case__ = [len(_A ) for line in example['content'].splitlines()] return {"line_mean": np.mean(_A ), "line_max": max(_A )} def a_ ( _A ) -> str: """simple docstring""" snake_case__ = np.mean([c.isalnum() for c in example['content']] ) return {"alpha_frac": alpha_frac} def a_ ( _A , _A ) -> Union[str, Any]: """simple docstring""" if example["hash"] in uniques: uniques.remove(example['hash'] ) return True else: return False def a_ ( _A , _A=5 ) -> Tuple: """simple docstring""" snake_case__ = ['auto-generated', 'autogenerated', 'automatically generated'] snake_case__ = example['content'].splitlines() for _, line in zip(range(_A ) , _A ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a_ ( _A , _A=5 , _A=0.05 ) -> Dict: """simple docstring""" snake_case__ = ['unit tests', 'test file', 'configuration file'] snake_case__ = example['content'].splitlines() snake_case__ = 0 snake_case__ = 0 # first test for _, line in zip(range(_A ) , _A ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case__ = example['content'].count('\n' ) snake_case__ = int(coeff * nlines ) for line in lines: count_config += line.lower().count('config' ) count_test += line.lower().count('test' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a_ ( _A ) -> List[str]: """simple docstring""" snake_case__ = ['def ', 'class ', 'for ', 'while '] snake_case__ = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a_ ( _A , _A=4 ) -> Tuple: """simple docstring""" snake_case__ = example['content'].splitlines() snake_case__ = 0 for line in lines: counter += line.lower().count('=' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a_ ( _A ) -> List[Any]: """simple docstring""" snake_case__ = tokenizer(example['content'] , truncation=_A )['input_ids'] snake_case__ = len(example['content'] ) / len(_A ) return {"ratio": ratio} def a_ ( _A ) -> List[str]: """simple docstring""" snake_case__ = {} results.update(get_hash(_A ) ) results.update(line_stats(_A ) ) results.update(alpha_stats(_A ) ) results.update(char_token_ratio(_A ) ) results.update(is_autogenerated(_A ) ) results.update(is_config_or_test(_A ) ) results.update(has_no_keywords(_A ) ) results.update(has_few_assignments(_A ) ) return results def a_ ( _A , _A , _A ) -> Optional[int]: """simple docstring""" if not check_uniques(_A , _A ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a_ ( _A ) -> Optional[Any]: """simple docstring""" with open(_A , 'rb' ) as f_in: with gzip.open(str(_A ) + '.gz' , 'wb' , compresslevel=6 ) as f_out: shutil.copyfileobj(_A , _A ) os.unlink(_A ) # Settings __UpperCamelCase : Union[str, Any] = HfArgumentParser(PreprocessingArguments) __UpperCamelCase : Dict = parser.parse_args() if args.num_workers is None: __UpperCamelCase : Any = multiprocessing.cpu_count() __UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __UpperCamelCase : Any = time.time() __UpperCamelCase : Union[str, Any] = load_dataset(args.dataset_name, split="""train""") print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __UpperCamelCase : Tuple = time.time() __UpperCamelCase : List[str] = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __UpperCamelCase : int = set(ds.unique("""hash""")) __UpperCamelCase : str = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __UpperCamelCase : Optional[Any] = time.time() __UpperCamelCase : List[str] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __UpperCamelCase : Union[str, Any] = time.time() __UpperCamelCase , __UpperCamelCase : List[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __UpperCamelCase : Optional[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / """duplicate_clusters.json""", """w""") as f: json.dump(duplicate_clusters, f) __UpperCamelCase : List[str] = output_dir / """data""" data_dir.mkdir(exist_ok=True) __UpperCamelCase : Optional[int] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __UpperCamelCase : List[Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __UpperCamelCase : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
307
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["pixel_values"] def __init__( self: List[Any] , UpperCamelCase: bool = True , UpperCamelCase: Optional[Dict[str, int]] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , **UpperCamelCase: Optional[int] , ) -> None: super().__init__(**UpperCamelCase ) snake_case__ = size if size is not None else {'shortest_edge': 2_56} snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_resize snake_case__ = size snake_case__ = resample snake_case__ = do_center_crop snake_case__ = crop_size snake_case__ = do_rescale snake_case__ = rescale_factor snake_case__ = do_normalize snake_case__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ = get_resize_output_image_size(UpperCamelCase , size=size['shortest_edge'] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[Any] , ) -> np.ndarray: snake_case__ = get_size_dict(UpperCamelCase ) return center_crop(UpperCamelCase , size=(size['height'], size['width']) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: float , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Dict ) -> np.ndarray: return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ) -> np.ndarray: return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Any , UpperCamelCase: ImageInput , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[float] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Any , ) -> Optional[Any]: snake_case__ = do_resize if do_resize is not None else self.do_resize snake_case__ = size if size is not None else self.size snake_case__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) snake_case__ = resample if resample is not None else self.resample snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ = crop_size if crop_size is not None else self.crop_size snake_case__ = get_size_dict(UpperCamelCase ) snake_case__ = do_rescale if do_rescale is not None else self.do_rescale snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ = do_normalize if do_normalize is not None else self.do_normalize snake_case__ = image_mean if image_mean is not None else self.image_mean snake_case__ = image_std if image_std is not None else self.image_std snake_case__ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case__ = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: snake_case__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: snake_case__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: snake_case__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: snake_case__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] snake_case__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] snake_case__ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
307
1
from ... import PretrainedConfig __UpperCamelCase : List[Any] = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP _UpperCAmelCase = "nezha" def __init__( self: Tuple , UpperCamelCase: Optional[Any]=2_11_28 , UpperCamelCase: Tuple=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: Tuple=12 , UpperCamelCase: Optional[int]=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: Any=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Optional[int]=5_12 , UpperCamelCase: Optional[Any]=64 , UpperCamelCase: Tuple=2 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: str=1e-12 , UpperCamelCase: Any=0.1 , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: Tuple=2 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Any=True , **UpperCamelCase: Optional[int] , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = hidden_act snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = max_relative_position snake_case__ = type_vocab_size snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = classifier_dropout snake_case__ = use_cache
307
import random from typing import Any def a_ ( _A ) -> list[Any]: """simple docstring""" for _ in range(len(_A ) ): snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ = random.randint(0 , len(_A ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": __UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7] __UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
307
1
def a_ ( _A ) -> list: """simple docstring""" snake_case__ = len(_A ) for i in range(1 , _A ): snake_case__ = collection[i] snake_case__ = 0 snake_case__ = i - 1 while low <= high: snake_case__ = (low + high) // 2 if val < collection[mid]: snake_case__ = mid - 1 else: snake_case__ = mid + 1 for j in range(_A , _A , -1 ): snake_case__ = collection[j - 1] snake_case__ = val return collection if __name__ == "__main__": __UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() __UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")] print(binary_insertion_sort(unsorted))
307
class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE( a_ ): pass class __SCREAMING_SNAKE_CASE: def __init__( self: List[str] ) -> Union[str, Any]: snake_case__ = [ [], [], [], ] def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None: try: if len(self.queues[priority] ) >= 1_00: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(UpperCamelCase ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def lowerCAmelCase_ ( self: List[Any] ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self: Union[str, Any] ) -> str: return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) ) class __SCREAMING_SNAKE_CASE: def __init__( self: Union[str, Any] ) -> Any: snake_case__ = [] def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None: if len(self.queue ) == 1_00: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(UpperCamelCase ) def lowerCAmelCase_ ( self: int ) -> int: if not self.queue: raise UnderFlowError('The queue is empty' ) else: snake_case__ = min(self.queue ) self.queue.remove(UpperCamelCase ) return data def __str__( self: Optional[Any] ) -> str: return str(self.queue ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ) -> List[Any]: """simple docstring""" snake_case__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
307
1
import os import sys import unittest __UpperCamelCase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __UpperCamelCase : Tuple = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") __UpperCamelCase : Any = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class __SCREAMING_SNAKE_CASE( unittest.TestCase ): def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case__ = get_test_to_tester_mapping(UpperCamelCase ) snake_case__ = get_test_to_tester_mapping(UpperCamelCase ) snake_case__ = {'BertModelTest': 'BertModelTester'} snake_case__ = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: snake_case__ = get_model_to_test_mapping(UpperCamelCase ) snake_case__ = get_model_to_test_mapping(UpperCamelCase ) snake_case__ = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } snake_case__ = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: snake_case__ = get_model_to_tester_mapping(UpperCamelCase ) snake_case__ = get_model_to_tester_mapping(UpperCamelCase ) snake_case__ = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } snake_case__ = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
307
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = ["image_processor", "tokenizer"] _UpperCAmelCase = "LayoutLMv2ImageProcessor" _UpperCAmelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Union[str, Any] ) -> int: if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase , ) snake_case__ = kwargs.pop('feature_extractor' ) snake_case__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCamelCase: Union[List[List[int]], List[List[List[int]]]] = None , UpperCamelCase: Optional[Union[List[int], List[List[int]]]] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[bool, str, PaddingStrategy] = False , UpperCamelCase: Union[bool, str, TruncationStrategy] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: int = 0 , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[str, TensorType]] = None , **UpperCamelCase: Any , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor snake_case__ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case__ = features['words'] snake_case__ = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values snake_case__ = features.pop('pixel_values' ) if return_overflowing_tokens is True: snake_case__ = self.get_overflowing_images(UpperCamelCase , encoded_inputs['overflow_to_sample_mapping'] ) snake_case__ = images return encoded_inputs def lowerCAmelCase_ ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Dict , **UpperCamelCase: Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCAmelCase_ ( self: str ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCAmelCase_ ( self: Any ) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase , ) return self.image_processor
307
1
from __future__ import annotations __UpperCamelCase : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __UpperCamelCase : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def a_ ( _A ) -> list[float]: """simple docstring""" snake_case__ = [] snake_case__ = len(_A ) for i in range(_A ): snake_case__ = -1 for j in range(i + 1 , _A ): if arr[i] < arr[j]: snake_case__ = arr[j] break result.append(_A ) return result def a_ ( _A ) -> list[float]: """simple docstring""" snake_case__ = [] for i, outer in enumerate(_A ): snake_case__ = -1 for inner in arr[i + 1 :]: if outer < inner: snake_case__ = inner break result.append(_A ) return result def a_ ( _A ) -> list[float]: """simple docstring""" snake_case__ = len(_A ) snake_case__ = [] snake_case__ = [-1] * arr_size for index in reversed(range(_A ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: snake_case__ = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __UpperCamelCase : Union[str, Any] = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
307
def a_ ( _A = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
307
1
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap __UpperCamelCase : str = """Usage of script: script_name <size_of_canvas:int>""" __UpperCamelCase : Tuple = [0] * 100 + [1] * 10 random.shuffle(choice) def a_ ( _A ) -> list[list[bool]]: """simple docstring""" snake_case__ = [[False for i in range(_A )] for j in range(_A )] return canvas def a_ ( _A ) -> None: """simple docstring""" for i, row in enumerate(_A ): for j, _ in enumerate(_A ): snake_case__ = bool(random.getrandbits(1 ) ) def a_ ( _A ) -> list[list[bool]]: """simple docstring""" snake_case__ = np.array(_A ) snake_case__ = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(_A ): for c, pt in enumerate(_A ): snake_case__ = __judge_point( _A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) snake_case__ = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. snake_case__ = current_canvas.tolist() return return_canvas def a_ ( _A , _A ) -> bool: """simple docstring""" snake_case__ = 0 snake_case__ = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. snake_case__ = pt if pt: if alive < 2: snake_case__ = False elif alive == 2 or alive == 3: snake_case__ = True elif alive > 3: snake_case__ = False else: if alive == 3: snake_case__ = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) __UpperCamelCase : Tuple = int(sys.argv[1]) # main working structure of this module. __UpperCamelCase : str = create_canvas(canvas_size) seed(c) __UpperCamelCase , __UpperCamelCase : List[Any] = plt.subplots() fig.show() __UpperCamelCase : int = ListedColormap(["""w""", """k"""]) try: while True: __UpperCamelCase : Any = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
307
import os def a_ ( ) -> Optional[Any]: """simple docstring""" snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' ) with open(_A ) as file_hand: return str(sum(int(_A ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
307
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp __UpperCamelCase : Optional[Any] = 5 __UpperCamelCase : List[str] = 10 @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ): _UpperCAmelCase = SpeechaTextTokenizer _UpperCAmelCase = False _UpperCAmelCase = True def lowerCAmelCase_ ( self: Tuple ) -> str: super().setUp() snake_case__ = sp.SentencePieceProcessor() spm_model.Load(UpperCamelCase ) snake_case__ = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase ) )] snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) snake_case__ = Path(self.tmpdirname ) save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) snake_case__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self: Any ) -> Tuple: snake_case__ = '<pad>' snake_case__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def lowerCAmelCase_ ( self: Dict ) -> Any: snake_case__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(UpperCamelCase ) , 10_01 ) def lowerCAmelCase_ ( self: Dict ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict: snake_case__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) snake_case__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) snake_case__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) snake_case__ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual(UpperCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) snake_case__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def lowerCAmelCase_ ( self: Any ) -> Dict: # fmt: off snake_case__ = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class __SCREAMING_SNAKE_CASE( unittest.TestCase ): _UpperCAmelCase = "valhalla/s2t_mustc_multilinguial_medium" _UpperCAmelCase = "C'est trop cool" _UpperCAmelCase = "Esto es genial" @classmethod def lowerCAmelCase_ ( cls: Optional[Any] ) -> Tuple: snake_case__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]: self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]: self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def lowerCAmelCase_ ( self: List[Any] ) -> Dict: self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids ) snake_case__ = [ES_CODE, 4, 16_01, 47, 76_47, 2] snake_case__ = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) snake_case__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] ) -> int: snake_case__ = 'fr' snake_case__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , UpperCamelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: snake_case__ = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) snake_case__ = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
307
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __SCREAMING_SNAKE_CASE( ctypes.Structure ): # _fields is a specific attr expected by ctypes _UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def a_ ( ) -> Any: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a_ ( ) -> Tuple: """simple docstring""" if os.name == "nt": snake_case__ = CursorInfo() snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) snake_case__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a_ ( ) -> str: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
307
1
from __future__ import annotations def a_ ( _A , _A , _A ) -> dict[str, float]: """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance < 0: raise ValueError('Resistance cannot be negative' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
307
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : Any = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } __UpperCamelCase : Optional[Any] = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def a_ ( _A , _A=1 , _A=256 ) -> str: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a_ ( _A ) -> int: """simple docstring""" with open(_A , 'r' ) as f: return json.load(_A ) def a_ ( _A , _A ) -> int: """simple docstring""" with open(_A , 'w' ) as f: json.dump(_A , _A ) def a_ ( _A , _A , _A , _A=True ) -> List[str]: """simple docstring""" os.makedirs(_A , exist_ok=_A ) snake_case__ = os.path.join(_A , 'tmp' ) os.makedirs(_A , exist_ok=_A ) snake_case__ = read_json(os.path.join(_A , 'params.json' ) ) snake_case__ = NUM_SHARDS[model_size] snake_case__ = params['n_layers'] snake_case__ = params['n_heads'] snake_case__ = n_heads // num_shards snake_case__ = params['dim'] snake_case__ = dim // n_heads snake_case__ = 10000.0 snake_case__ = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: snake_case__ = params['n_kv_heads'] # for GQA / MQA snake_case__ = n_heads_per_shard // num_key_value_heads snake_case__ = dim // num_key_value_heads else: # compatibility with other checkpoints snake_case__ = n_heads snake_case__ = n_heads_per_shard snake_case__ = dim # permute for sliced rotary def permute(_A , _A=n_heads , _A=dim , _A=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) snake_case__ = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' ) else: # Sharded snake_case__ = [ torch.load(os.path.join(_A , f'''consolidated.{i:02d}.pth''' ) , map_location='cpu' ) for i in range(_A ) ] snake_case__ = 0 snake_case__ = {'weight_map': {}} for layer_i in range(_A ): snake_case__ = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. snake_case__ = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) snake_case__ = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) snake_case__ = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_A )] , dim=1 ) snake_case__ = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_A )] , dim=0 ) snake_case__ = inv_freq for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) snake_case__ = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded snake_case__ = { 'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight'], } else: snake_case__ = { 'model.norm.weight': loaded[0]['norm.weight'], 'model.embed_tokens.weight': torch.cat( [loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): snake_case__ = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs snake_case__ = {'total_size': param_count * 2} write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) ) snake_case__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 snake_case__ = params['multiple_of'] if 'multiple_of' in params else 256 snake_case__ = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('Loading the checkpoint in a Llama model.' ) snake_case__ = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print('Saving in the Transformers format.' ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def a_ ( _A , _A ) -> Tuple: """simple docstring""" # Initialize the tokenizer based on the `spm` model snake_case__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) snake_case__ = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def a_ ( ) -> str: """simple docstring""" snake_case__ = argparse.ArgumentParser() parser.add_argument( '--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , ) parser.add_argument( '--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , ) parser.add_argument( '--output_dir' , help='Location to write HF model and tokenizer' , ) parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' ) snake_case__ = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) snake_case__ = os.path.join(args.input_dir , 'tokenizer.model' ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
307
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : int = {"""tokenizer_file""": """tokenizer.json"""} __UpperCamelCase : Any = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = ["input_ids", "attention_mask"] _UpperCAmelCase = None def __init__( self: List[str] , UpperCamelCase: Any=None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: List[str]=None , UpperCamelCase: List[str]="<unk>" , UpperCamelCase: List[Any]="<s>" , UpperCamelCase: Dict="</s>" , UpperCamelCase: Optional[Any]="<pad>" , UpperCamelCase: List[Any]=False , UpperCamelCase: List[str]=False , **UpperCamelCase: List[str] , ) -> Dict: super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , add_prefix_space=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , **UpperCamelCase , ) snake_case__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCamelCase ) != add_prefix_space: snake_case__ = getattr(UpperCamelCase , pre_tok_state.pop('type' ) ) snake_case__ = add_prefix_space snake_case__ = pre_tok_class(**UpperCamelCase ) snake_case__ = add_prefix_space def lowerCAmelCase_ ( self: Optional[Any] , *UpperCamelCase: Any , **UpperCamelCase: Tuple ) -> BatchEncoding: snake_case__ = kwargs.get('is_split_into_words' , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: Dict , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: Dict ) -> BatchEncoding: snake_case__ = kwargs.get('is_split_into_words' , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._encode_plus(*UpperCamelCase , **UpperCamelCase ) def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]: snake_case__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: "Conversation" ) -> List[int]: snake_case__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] ) if len(UpperCamelCase ) > self.model_max_length: snake_case__ = input_ids[-self.model_max_length :] return input_ids
307
import os import string import sys __UpperCamelCase : List[Any] = 1 << 8 __UpperCamelCase : Union[str, Any] = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __UpperCamelCase : Optional[Any] = KEYMAP["""up"""] __UpperCamelCase : Tuple = KEYMAP["""left"""] if sys.platform == "win32": __UpperCamelCase : List[Any] = [] __UpperCamelCase : int = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __UpperCamelCase : List[str] = ord(str(i)) def a_ ( ) -> Optional[int]: """simple docstring""" if os.name == "nt": import msvcrt snake_case__ = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke snake_case__ = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): snake_case__ = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: snake_case__ = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) snake_case__ = chr(KEYMAP['esc'] ) except KeyError: snake_case__ = cha[1] else: snake_case__ = ch.decode(_A ) else: snake_case__ = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty snake_case__ = sys.stdin.fileno() snake_case__ = termios.tcgetattr(_A ) try: tty.setraw(_A ) snake_case__ = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def a_ ( ) -> Union[str, Any]: """simple docstring""" snake_case__ = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: snake_case__ = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: snake_case__ = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __UpperCamelCase : Optional[int] = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ __UpperCamelCase : List[Any] = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ __UpperCamelCase : Any = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE( datasets.Metric ): def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def lowerCAmelCase_ ( self: int , UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Tuple=True , UpperCamelCase: Tuple=False ) -> int: if rouge_types is None: snake_case__ = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] snake_case__ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase ) if use_aggregator: snake_case__ = scoring.BootstrapAggregator() else: snake_case__ = [] for ref, pred in zip(UpperCamelCase , UpperCamelCase ): snake_case__ = scorer.score(UpperCamelCase , UpperCamelCase ) if use_aggregator: aggregator.add_scores(UpperCamelCase ) else: scores.append(UpperCamelCase ) if use_aggregator: snake_case__ = aggregator.aggregate() else: snake_case__ = {} for key in scores[0]: snake_case__ = [score[key] for score in scores] return result
307
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class __SCREAMING_SNAKE_CASE( a_ ): _UpperCAmelCase = "gptsan-japanese" _UpperCAmelCase = [ "past_key_values", ] _UpperCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]: snake_case__ = vocab_size snake_case__ = max_position_embeddings snake_case__ = d_model snake_case__ = d_ff snake_case__ = d_ext snake_case__ = d_spout snake_case__ = num_switch_layers snake_case__ = num_ext_layers snake_case__ = num_switch_layers + num_ext_layers snake_case__ = num_heads snake_case__ = num_experts snake_case__ = expert_capacity snake_case__ = dropout_rate snake_case__ = layer_norm_epsilon snake_case__ = router_bias snake_case__ = router_jitter_noise snake_case__ = router_dtype snake_case__ = router_ignore_padding_tokens snake_case__ = output_hidden_states snake_case__ = output_attentions snake_case__ = initializer_factor snake_case__ = output_router_logits snake_case__ = use_cache super().__init__( separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
307
1