code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import torch from torch import nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a=1 , __a=False ): '''simple docstring''' super().__init__() __a : Optional[int] = n_token __a : List[Any] = d_embed __a : Any = d_proj __a : List[Any] = cutoffs + [n_token] __a : Dict = [0] + self.cutoffs __a : Optional[Any] = div_val __a : Union[str, Any] = self.cutoffs[0] __a : List[str] = len(self.cutoffs ) - 1 __a : int = self.shortlist_size + self.n_clusters if self.n_clusters > 0: __a : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) __a : Dict = nn.Parameter(torch.zeros(self.n_clusters ) ) __a : List[str] = nn.ModuleList() __a : Tuple = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a ) ) ) else: self.out_projs.append(__a ) self.out_layers.append(nn.Linear(__a , __a ) ) else: for i in range(len(self.cutoffs ) ): __a , __a : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a : Union[str, Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a ) ) ) self.out_layers.append(nn.Linear(__a , r_idx - l_idx ) ) __a : Tuple = keep_order def __UpperCAmelCase ( self , __a , __a , __a , __a ): '''simple docstring''' if proj is None: __a : str = nn.functional.linear(__a , __a , bias=__a ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: __a : str = nn.functional.linear(__a , proj.t().contiguous() ) __a : Any = nn.functional.linear(__a , __a , bias=__a ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def __UpperCAmelCase ( self , __a , __a=None , __a=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n __a : Optional[Any] = hidden[..., :-1, :].contiguous() __a : Tuple = labels[..., 1:].contiguous() __a : List[str] = hidden.view(-1 , hidden.size(-1 ) ) __a : List[str] = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: __a : Optional[int] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: __a : Optional[int] = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: __a : Optional[int] = labels != -100 __a : Tuple = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device ) __a : int = ( -nn.functional.log_softmax(__a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: __a : str = nn.functional.log_softmax(__a , dim=-1 ) else: # construct weights and biases __a , __a : Any = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __a , __a : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a : Tuple = self.out_layers[0].weight[l_idx:r_idx] __a : str = self.out_layers[0].bias[l_idx:r_idx] else: __a : Optional[Any] = self.out_layers[i].weight __a : int = self.out_layers[i].bias if i == 0: __a : Any = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __a : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__a ) biases.append(__a ) __a , __a , __a : int = weights[0], biases[0], self.out_projs[0] __a : List[str] = self._compute_logit(__a , __a , __a , __a ) __a : List[Any] = nn.functional.log_softmax(__a , dim=1 ) if labels is None: __a : int = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: __a : Optional[int] = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device ) __a : Tuple = 0 __a : List[Any] = [0] + self.cutoffs for i in range(len(__a ) - 1 ): __a , __a : Any = cutoff_values[i], cutoff_values[i + 1] if labels is not None: __a : List[str] = (labels >= l_idx) & (labels < r_idx) __a : Dict = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue __a : int = labels.index_select(0 , __a ) - l_idx __a : Any = head_logprob.index_select(0 , __a ) __a : List[str] = hidden.index_select(0 , __a ) else: __a : str = hidden if i == 0: if labels is not None: __a : str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: __a : str = head_logprob[:, : self.cutoffs[0]] else: __a , __a , __a : Tuple = weights[i], biases[i], self.out_projs[i] __a : List[str] = self._compute_logit(__a , __a , __a , __a ) __a : int = nn.functional.log_softmax(__a , dim=1 ) __a : int = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: __a : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: __a : int = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i __a : int = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , __a , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.n_clusters == 0: __a : Any = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(__a , dim=-1 ) else: # construct weights and biases __a , __a : Any = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: __a , __a : int = self.cutoff_ends[i], self.cutoff_ends[i + 1] __a : List[Any] = self.out_layers[0].weight[l_idx:r_idx] __a : Tuple = self.out_layers[0].bias[l_idx:r_idx] else: __a : List[Any] = self.out_layers[i].weight __a : List[str] = self.out_layers[i].bias if i == 0: __a : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 ) __a : str = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(__a ) biases.append(__a ) __a , __a , __a : Any = weights[0], biases[0], self.out_projs[0] __a : str = self._compute_logit(__a , __a , __a , __a ) __a : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) ) __a : Any = nn.functional.log_softmax(__a , dim=1 ) __a : Tuple = [0] + self.cutoffs for i in range(len(__a ) - 1 ): __a , __a : Dict = cutoff_values[i], cutoff_values[i + 1] if i == 0: __a : List[str] = head_logprob[:, : self.cutoffs[0]] else: __a , __a , __a : Union[str, Any] = weights[i], biases[i], self.out_projs[i] __a : Any = self._compute_logit(__a , __a , __a , __a ) __a : List[str] = nn.functional.log_softmax(__a , dim=1 ) __a : List[Any] = head_logprob[:, -i] + tail_logprob_i __a : Union[str, Any] = logprob_i return out
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
'''simple docstring''' from math import ceil, sqrt def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_000_000 ): __a : Optional[int] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __a : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __a : Dict = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'''{solution() = }''')
294
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase : str = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ): __a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! __a : Optional[Any] = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } __a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) __a : Dict = BeautifulSoup(html.text , 'html.parser' ) __a : List[str] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) __a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE ) __a : List[str] = json.loads(_SCREAMING_SNAKE_CASE ) __a : List[Any] = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 __a : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , ) __a : Optional[Any] = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ): if index >= max_images: return index __a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Dict = urllib.request.build_opener() __a : Union[str, Any] = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_SCREAMING_SNAKE_CASE ) __a : List[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 _SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __lowercase : Optional[int] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
294
1
'''simple docstring''' import re def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Dict = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('+918827897895'))
294
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Optional[int] = pytest.mark.integration __lowercase : Dict = {'comet'} __lowercase : Optional[int] = importlib.util.find_spec('fairseq') is not None __lowercase : Tuple = {'code_eval'} __lowercase : Tuple = os.name == 'nt' __lowercase : List[Any] = {'bertscore', 'frugalscore', 'perplexity'} __lowercase : str = importlib.util.find_spec('transformers') is not None def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : int , _SCREAMING_SNAKE_CASE : Any ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def lowerCamelCase (): __a : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class __UpperCamelCase ( parameterized.TestCase ): A_ = {} A_ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : List[Any] = '[...]' __a : Dict = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) __a : Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a ) # check parameters __a : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__a , metric_module.__name__ ): with self.use_local_metrics(): try: __a : Optional[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Dict = '[...]' __a : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path ) # run doctest with self.use_local_metrics(): __a : List[str] = doctest.testmod(__a , verbose=__a , raise_on_error=__a ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ): yield else: yield @contextmanager def __UpperCAmelCase ( self ): '''simple docstring''' def load_local_metric(__a , *__a , **__a ): return load_metric(os.path.join('metrics' , __a ) , *__a , **__a ) with patch('datasets.load_metric' ) as mock_load_metric: __a : int = load_local_metric yield @classmethod def __UpperCAmelCase ( cls , __a ): '''simple docstring''' def wrapper(__a ): __a : Union[str, Any] = contextmanager(__a ) __a : int = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a ): '''simple docstring''' assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: __a : List[str] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , *_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : Dict ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: __a : Dict = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ): class __UpperCamelCase : def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' assert len(__a ) == 2 __a : Optional[int] = [0.19, 0.92] return scores, sum(__a ) / len(__a ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: __a : Optional[Any] = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: __a : Optional[int] = load_from_checkpoint yield def lowerCamelCase (): __a : int = load_metric(os.path.join('metrics' , 'seqeval' ) ) __a : Tuple = 'ERROR' __a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowercase : Optional[int] = 25_60_47 __lowercase : Tuple = 25_61_45 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = NllbTokenizer A_ = NllbTokenizerFast A_ = True A_ = True A_ = {} def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a : str = NllbTokenizer(__a , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = NllbTokenizer(__a , keep_accents=__a ) __a : List[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __a : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __a : List[Any] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __a : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __a : str = self.tokenizer_class.from_pretrained(__a , **__a ) __a : List[str] = tempfile.mkdtemp() __a : Dict = tokenizer_r.save_pretrained(__a ) __a : List[Any] = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __a : List[str] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way __a : str = tokenizer_r.from_pretrained(__a ) __a : Dict = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=True __a : Optional[int] = tempfile.mkdtemp() __a : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a ) __a : List[str] = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way __a : Tuple = tokenizer_r.from_pretrained(__a ) __a : str = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=False __a : Tuple = tempfile.mkdtemp() __a : Union[str, Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a ) __a : Union[str, Any] = tokenizer_p.save_pretrained(__a ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __a : Tuple = tokenizer_r.from_pretrained(__a ) __a : Dict = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_seqaseq: return __a : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. __a : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] __a : Dict = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: __a : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified __a : Tuple = tokenizer.prepare_seqaseq_batch( __a , tgt_texts=__a , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __a : Any = tokenizer.prepare_seqaseq_batch( src_texts=__a , max_length=3 , max_target_length=10 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , __a ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Union[str, Any] = [AddedToken('<special>' , lstrip=__a )] __a : Dict = self.rust_tokenizer_class.from_pretrained( __a , additional_special_tokens=__a , **__a ) __a : Dict = tokenizer_r.encode('Hey this is a <special> token' ) __a : Optional[Any] = tokenizer_r.encode('<special>' , add_special_tokens=__a )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __a : int = self.rust_tokenizer_class.from_pretrained( __a , additional_special_tokens=__a , **__a , ) __a : List[str] = self.tokenizer_class.from_pretrained( __a , additional_special_tokens=__a , **__a ) __a : List[Any] = tokenizer_p.encode('Hey this is a <special> token' ) __a : Tuple = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(__a , __a ) self.assertEqual(__a , __a ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): A_ = "facebook/nllb-200-distilled-600M" A_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] A_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] A_ = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) __a : List[str] = 1 return cls def __UpperCAmelCase ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertIn(__a , self.tokenizer.all_special_ids ) # fmt: off __a : List[Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on __a : Dict = self.tokenizer.decode(__a , skip_special_tokens=__a ) __a : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a ) self.assertEqual(__a , __a ) self.assertNotIn(self.tokenizer.eos_token , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , __a ) __a : int = 10 __a : List[Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , __a ) self.assertEqual(len(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = tempfile.mkdtemp() __a : str = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__a ) __a : Dict = NllbTokenizer.from_pretrained(__a ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __a : str = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(__a , __a ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) __a : int = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __a ) self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='pt' ) __a : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors='pt' ) __a : List[Any] = targets['input_ids'] __a : Dict = shift_tokens_right( __a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(__a ) , { # A, test, EOS, en_XX 'input_ids': [[25_6047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_6057, } , ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = True __a : Any = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) __a : List[str] = False __a : Any = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Any = logging.get_logger(__name__) __lowercase : Dict = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mvp" A_ = ["past_key_values"] A_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __a=5_0267 , __a=1024 , __a=12 , __a=4096 , __a=16 , __a=12 , __a=4096 , __a=16 , __a=0.0 , __a=0.0 , __a="gelu" , __a=1024 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.0 , __a=False , __a=True , __a=1 , __a=0 , __a=2 , __a=True , __a=2 , __a=2 , __a=False , __a=100 , __a=800 , **__a , ): '''simple docstring''' __a : Any = vocab_size __a : Optional[Any] = max_position_embeddings __a : List[str] = d_model __a : Optional[int] = encoder_ffn_dim __a : str = encoder_layers __a : Any = encoder_attention_heads __a : Optional[int] = decoder_ffn_dim __a : Union[str, Any] = decoder_layers __a : Optional[Any] = decoder_attention_heads __a : Optional[Any] = dropout __a : int = attention_dropout __a : List[str] = activation_dropout __a : Union[str, Any] = activation_function __a : str = init_std __a : str = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : Optional[Any] = classifier_dropout __a : Tuple = use_cache __a : List[str] = encoder_layers __a : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True __a : Union[str, Any] = use_prompt __a : List[str] = prompt_length __a : List[Any] = prompt_mid_dim super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __a ): __a : Any = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ 'The config can simply be saved and uploaded again to be fixed.' )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def __UpperCAmelCase ( self , __a=0 ): '''simple docstring''' __a : Optional[Any] = np.random.RandomState(__a ) __a : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = self.get_dummy_inputs() __a : Any = pipe(**__a ).images __a : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : int = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __a : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a ) pipe.set_progress_bar_config(disable=__a ) __a : Any = self.get_dummy_inputs() __a : List[Any] = pipe(**__a ).images __a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : str = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __a : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __a : int = self.get_dummy_inputs() __a : int = pipe(**__a ).images __a : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : Optional[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __a : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __a : List[str] = self.get_dummy_inputs() __a : Any = pipe(**__a ).images __a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __a : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __a : Dict = self.get_dummy_inputs() __a : Union[str, Any] = pipe(**__a ).images __a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : List[Any] = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) __a : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) __a : int = self.get_dummy_inputs() __a : int = pipe(**__a ).images __a : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __a : List[Any] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__a ) __a : List[str] = self.get_dummy_inputs() __a : str = 3 * [inputs['prompt']] # forward __a : Dict = pipe(**__a ) __a : Tuple = output.images[0, -3:, -3:, -1] __a : Any = self.get_dummy_inputs() __a : int = 3 * [inputs.pop('prompt' )] __a : Any = pipe.tokenizer( __a , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='np' , ) __a : Optional[Any] = text_inputs['input_ids'] __a : Tuple = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] __a : List[Any] = prompt_embeds # forward __a : Tuple = pipe(**__a ) __a : Union[str, Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=__a ) __a : Optional[int] = self.get_dummy_inputs() __a : List[str] = 3 * ['this is a negative prompt'] __a : int = negative_prompt __a : Dict = 3 * [inputs['prompt']] # forward __a : Dict = pipe(**__a ) __a : int = output.images[0, -3:, -3:, -1] __a : Optional[Any] = self.get_dummy_inputs() __a : Dict = 3 * [inputs.pop('prompt' )] __a : List[Any] = [] for p in [prompt, negative_prompt]: __a : List[Any] = pipe.tokenizer( __a , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=__a , return_tensors='np' , ) __a : str = text_inputs['input_ids'] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) __a , __a : Any = embeds # forward __a : Any = pipe(**__a ) __a : int = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ort.SessionOptions() __a : Tuple = False return options def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = OnnxStableDiffusionPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = 'A painting of a squirrel eating a burger' np.random.seed(0 ) __a : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' ) __a : List[Any] = output.images __a : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __a : Optional[Any] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = DDIMScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __a : str = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__a ) __a : Tuple = 'open neural network exchange' __a : List[str] = np.random.RandomState(0 ) __a : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='np' ) __a : List[Any] = output.images __a : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __a : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) __a : int = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = 'open neural network exchange' __a : Any = np.random.RandomState(0 ) __a : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='np' ) __a : Optional[int] = output.images __a : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __a : Union[str, Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 0 def test_callback_fn(__a , __a , __a ) -> None: __a : List[str] = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) __a : Optional[int] = latents[0, -3:, -3:, -1] __a : Optional[Any] = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) __a : Tuple = latents[0, -3:, -3:, -1] __a : List[str] = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 __a : Any = False __a : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = 'Andromeda galaxy in a bottle' __a : List[Any] = np.random.RandomState(0 ) pipe( prompt=__a , num_inference_steps=5 , guidance_scale=7.5 , generator=__a , callback=__a , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = OnnxStableDiffusionPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(__a , __a ) assert pipe.safety_checker is None __a : Optional[int] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__a ) __a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(__a ) # sanity check that the pipeline still works assert pipe.safety_checker is None __a : str = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None
294
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __a : Tuple = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) ) self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __a : Any = get_activation('gelu' ) __a : List[str] = get_activation('gelu_10' ) __a : Tuple = torch_builtin(__a ) __a : Any = geluaa(__a ) __a : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__a ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def __UpperCAmelCase ( self ): '''simple docstring''' get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(__a ): get_activation('bogus' ) with self.assertRaises(__a ): get_activation(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = get_activation('gelu' ) __a : List[str] = 1 __a : str = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__a ): __a : List[str] = acta.a
294
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ): __a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[Any] = model __a : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Union[str, Any] = model.module if not keep_fpaa_wrapper: __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' ) __a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ): __a : Any = forward.__wrapped__ if forward == original_forward: break __a : str = forward if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[str] = model __a : Optional[int] = compiled_model return model def lowerCamelCase (): PartialState().wait_for_everyone() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ): for key, value in kwargs.items(): __a : Optional[int] = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = value return destination def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ): if port is None: __a : List[str] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
294
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness __lowercase : Optional[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' __lowercase : List[str] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' __lowercase : List[Any] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' __lowercase : int = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' __lowercase : Union[str, Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def __UpperCAmelCase ( self , __a , __a , __a=[1, 10, 100] , __a=4 , __a=3.0 ): '''simple docstring''' if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=__a ) as executor: __a : Tuple = [] __a : Optional[int] = Counter() __a : Optional[int] = 0 __a : Any = defaultdict(__a ) for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ): for candidate in candidates: __a : Optional[int] = candidate + '\n' + test_case __a : Any = (test_program, timeout, task_id, completion_id[task_id]) __a : Any = executor.submit(__a , *__a ) futures.append(__a ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__a ): __a : Optional[Any] = future.result() results[result["task_id"]].append((result['completion_id'], result) ) __a , __a : List[str] = [], [] for result in results.values(): result.sort() __a : Optional[Any] = [r[1]['passed'] for r in result] total.append(len(__a ) ) correct.append(sum(__a ) ) __a : Tuple = np.array(__a ) __a : Optional[Any] = np.array(__a ) __a : List[Any] = k __a : Optional[Any] = {f"""pass@{k}""": estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ): def estimator(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[int] = itertools.repeat(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) else: assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) __a : Any = iter(_SCREAMING_SNAKE_CASE ) return np.array([estimator(int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) for n, c in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] )
294
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
294
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): __a : Tuple = SwinvaConfig() __a : Union[str, Any] = swinva_name.split('_' ) __a : Tuple = name_split[1] if "to" in name_split[3]: __a : List[str] = int(name_split[3][-3:] ) else: __a : Union[str, Any] = int(name_split[3] ) if "to" in name_split[2]: __a : Optional[int] = int(name_split[2][-2:] ) else: __a : int = int(name_split[2][6:] ) if model_size == "tiny": __a : Optional[Any] = 96 __a : Union[str, Any] = (2, 2, 6, 2) __a : Union[str, Any] = (3, 6, 12, 24) elif model_size == "small": __a : str = 96 __a : Dict = (2, 2, 18, 2) __a : Union[str, Any] = (3, 6, 12, 24) elif model_size == "base": __a : Any = 128 __a : Any = (2, 2, 18, 2) __a : List[str] = (4, 8, 16, 32) else: __a : Optional[Any] = 192 __a : str = (2, 2, 18, 2) __a : Dict = (6, 12, 24, 48) if "to" in swinva_name: __a : Any = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __a : Tuple = 21_841 __a : Optional[int] = 'huggingface/label-files' __a : List[Any] = 'imagenet-22k-id2label.json' __a : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : str = idalabel __a : List[str] = {v: k for k, v in idalabel.items()} else: __a : List[str] = 1_000 __a : Tuple = 'huggingface/label-files' __a : Union[str, Any] = 'imagenet-1k-id2label.json' __a : Optional[int] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : Union[str, Any] = idalabel __a : List[str] = {v: k for k, v in idalabel.items()} __a : Union[str, Any] = img_size __a : str = num_classes __a : List[Any] = embed_dim __a : str = depths __a : Optional[int] = num_heads __a : Optional[Any] = window_size return config def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): if "patch_embed.proj" in name: __a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __a : Any = 'encoder.' + name if "attn.proj" in name: __a : str = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __a : Optional[int] = name.replace('attn' , 'attention.self' ) if "norm1" in name: __a : Optional[int] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __a : int = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __a : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __a : Dict = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __a : Union[str, Any] = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __a : str = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __a : Dict = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __a : Dict = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": __a : str = 'layernorm.weight' if name == "norm.bias": __a : List[Any] = 'layernorm.bias' if "head" in name: __a : str = name.replace('head' , 'classifier' ) else: __a : Optional[Any] = 'swinv2.' + name return name def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): for key in orig_state_dict.copy().keys(): __a : List[Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "mask" in key: continue elif "qkv" in key: __a : Any = key.split('.' ) __a : Union[str, Any] = int(key_split[1] ) __a : int = int(key_split[3] ) __a : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __a : Optional[int] = val[:dim, :] __a : Tuple = val[dim : dim * 2, :] __a : str = val[-dim:, :] else: __a : int = val[:dim] __a : List[str] = val[ dim : dim * 2 ] __a : Any = val[-dim:] else: __a : str = val return orig_state_dict def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ): __a : Optional[int] = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() __a : Optional[int] = get_swinva_config(_SCREAMING_SNAKE_CASE ) __a : Tuple = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.eval() __a : Any = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) __a : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Any = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) __a : Optional[int] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) __a : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) __a : List[str] = timm_model(inputs['pixel_values'] ) __a : int = model(**_SCREAMING_SNAKE_CASE ).logits assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) model.push_to_hub( repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": __lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swinv2_name', default='swinv2_tiny_patch4_window8_256', type=str, help='Name of the Swinv2 timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __lowercase : Any = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
294
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __UpperCamelCase : A_ = 42 A_ = None A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ): # Validation def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_SCREAMING_SNAKE_CASE ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( _SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _SCREAMING_SNAKE_CASE ) ) return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = KandinskyInpaintPipeline A_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] A_ = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] A_ = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A_ = False @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 100 @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __a : List[Any] = MultilingualCLIP(__a ) __a : int = text_encoder.eval() return text_encoder @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Optional[Any] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __a : str = UNetaDConditionModel(**__a ) return model @property def __UpperCAmelCase ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.dummy_text_encoder __a : List[str] = self.dummy_tokenizer __a : Optional[Any] = self.dummy_unet __a : str = self.dummy_movq __a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , ) __a : Optional[int] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a ) __a : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a ) # create init_image __a : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a ) __a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) ) # create mask __a : Tuple = np.ones((64, 64) , dtype=np.floataa ) __a : Any = 0 if str(__a ).startswith('mps' ): __a : str = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : List[Any] = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = 'cpu' __a : List[Any] = self.get_dummy_components() __a : str = self.pipeline_class(**__a ) __a : List[str] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = pipe(**self.get_dummy_inputs(__a ) ) __a : Any = output.images __a : Optional[int] = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __a : int = image[0, -3:, -3:, -1] __a : Tuple = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __a : Union[str, Any] = np.array( [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __a : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa ) __a : List[str] = 0 __a : Dict = 'a hat' __a : Any = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__a ) __a : List[str] = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) __a : Optional[Any] = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) __a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) __a , __a : Optional[Any] = pipe_prior( __a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __a : Tuple = pipeline( __a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a )
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ): __a : Dict = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: __a : Optional[int] = 1 - (matter_density + radiation_density + dark_energy) __a : str = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __a : Union[str, Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __lowercase : Any = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=None , __a=True , ): '''simple docstring''' __a : Optional[Any] = size if size is not None else {'shortest_edge': 20} __a : List[str] = crop_size if crop_size is not None else {'height': 18, 'width': 18} __a : Optional[Any] = parent __a : Tuple = batch_size __a : str = num_channels __a : Any = image_size __a : str = min_resolution __a : List[Any] = max_resolution __a : Any = do_resize __a : Optional[int] = size __a : Optional[int] = do_center_crop __a : str = crop_size __a : int = do_flip_channel_order def __UpperCAmelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = MobileViTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = MobileViTImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) self.assertTrue(hasattr(__a , 'do_center_crop' ) ) self.assertTrue(hasattr(__a , 'center_crop' ) ) self.assertTrue(hasattr(__a , 'do_flip_channel_order' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) __a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __a : List[Any] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __a : int = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __a : Optional[Any] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
294
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __lowercase : Any = [0, 25, 50] __lowercase : int = [25, 50, 75] __lowercase : List[str] = fuzz.membership.trimf(X, abca) __lowercase : Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __lowercase : List[Any] = np.ones(75) __lowercase : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __lowercase : str = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __lowercase : Optional[Any] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __lowercase : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
294
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __lowercase : Union[str, Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , *__a , **__a ): '''simple docstring''' warnings.warn( 'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use VideoMAEImageProcessor instead.' , __a , ) super().__init__(*__a , **__a )
294
'''simple docstring''' import sys __lowercase : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = 1 for digit in s: product *= int(_SCREAMING_SNAKE_CASE ) return product def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ): __a : Optional[int] = -sys.maxsize - 1 __a : Optional[Any] = n[:13] __a : int = 13 while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): __a : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: __a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) ) __a : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
294
1
'''simple docstring''' import requests __lowercase : List[str] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=' def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): # fetching a list of articles in json format __a : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['articles'] , 1 ): print(F"""{i}.) {article["title"]}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
294
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = CodeGenTokenizer A_ = CodeGenTokenizerFast A_ = True A_ = {"add_prefix_space": True} A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : Dict = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'lower newer' __a : Tuple = 'lower newer' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __a : str = 'lower newer' __a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) __a : List[str] = tokens + [tokenizer.unk_token] __a : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Any = 'lower newer' # Testing tokenization __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) __a : Dict = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a ) __a : int = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token __a : Any = tokens + [rust_tokenizer.unk_token] __a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input __a : List[Any] = 'This is a simple input' __a : Tuple = ['This is a simple input 1', 'This is a simple input 2'] __a : Tuple = ('This is a simple input', 'This is a pair') __a : str = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __a : str = 'This is a simple input' __a : Any = ['This is a simple input looooooooong', 'This is a simple input'] __a : Optional[int] = ('This is a simple input', 'This is a pair') __a : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __a : int = tokenizer.pad_token_id __a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' ) __a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) __a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' ) __a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = '$$$' __a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) __a : Union[str, Any] = 'This is a simple input' __a : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] __a : List[Any] = tokenizer.bos_token_id __a : List[str] = tokenizer(__a ) __a : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __a : Any = tokenizer.decode(out_s.input_ids ) __a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) __a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' __a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b' __a : Optional[int] = tokenizer.encode(__a ) __a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] __a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
1
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __lowercase : Optional[int] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __lowercase : str = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __lowercase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __lowercase : Optional[Any] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __lowercase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __lowercase : Union[str, Any] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __lowercase : str = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def lowerCamelCase (): __a , __a : int = randrange(len(_SCREAMING_SNAKE_CASE ) ), randrange(len(_SCREAMING_SNAKE_CASE ) ) __a : Dict = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __a , __a : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 100 ): return (generate_random_hand() for _ in range(_SCREAMING_SNAKE_CASE )) @pytest.mark.parametrize('hand, expected' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ): assert PokerHand(_SCREAMING_SNAKE_CASE )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): assert PokerHand(_SCREAMING_SNAKE_CASE )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ): __a : Tuple = PokerHand(_SCREAMING_SNAKE_CASE ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ): assert PokerHand(_SCREAMING_SNAKE_CASE )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): assert PokerHand(_SCREAMING_SNAKE_CASE )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] ): assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ): assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected def lowerCamelCase (): __a : List[str] = [PokerHand(_SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS] __a : Dict = poker_hands.copy() shuffle(_SCREAMING_SNAKE_CASE ) __a : str = chain(sorted(_SCREAMING_SNAKE_CASE ) ) for index, hand in enumerate(_SCREAMING_SNAKE_CASE ): assert hand == poker_hands[index] def lowerCamelCase (): # Test that five high straights are compared correctly. __a : Optional[Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=_SCREAMING_SNAKE_CASE ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase (): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __a : str = PokerHand('2C 4S AS 3D 5C' ) __a : Union[str, Any] = True __a : Dict = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase (): # Problem number 54 from Project Euler # Testing from poker_hands.txt file __a : Optional[int] = 0 __a : List[Any] = os.path.abspath(os.path.dirname(_SCREAMING_SNAKE_CASE ) ) __a : int = os.path.join(_SCREAMING_SNAKE_CASE , 'poker_hands.txt' ) with open(_SCREAMING_SNAKE_CASE ) as file_hand: for line in file_hand: __a : Tuple = line[:14].strip() __a : Tuple = line[15:].strip() __a , __a : Union[str, Any] = PokerHand(_SCREAMING_SNAKE_CASE ), PokerHand(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = player.compare_with(_SCREAMING_SNAKE_CASE ) if output == "Win": answer += 1 assert answer == 376
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) __a : Dict = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_SCREAMING_SNAKE_CASE ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __lowercase : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , *__a , **__a ): '''simple docstring''' warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , __a , ) super().__init__(*__a , **__a )
294
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {'height': 18, 'width': 18} __a : int = parent __a : Dict = batch_size __a : Optional[int] = num_channels __a : List[Any] = image_size __a : Tuple = min_resolution __a : str = max_resolution __a : str = do_resize __a : Optional[Any] = size __a : str = apply_ocr def __UpperCAmelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = LayoutLMvaImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) self.assertTrue(hasattr(__a , 'apply_ocr' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a ) self.assertIsInstance(encoding.boxes , __a ) # Test batched __a : Any = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) __a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' ) __a : Optional[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a ) self.assertListEqual(encoding.boxes , __a ) # with apply_OCR = False __a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a ) __a : List[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
294
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
294
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase : List[Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ernie_m" A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __a : int = vocab_size __a : Dict = hidden_size __a : str = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : Union[str, Any] = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : int = initializer_range __a : Dict = layer_norm_eps __a : int = classifier_dropout __a : Dict = is_decoder __a : int = act_dropout
294
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() ) @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): class __UpperCamelCase : def __init__( self , __a ): '''simple docstring''' __a : Tuple = metric_id class __UpperCamelCase : A_ = [MetricMock(lowerCAmelCase_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def __UpperCAmelCase ( self ): '''simple docstring''' return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() ) @pytest.mark.parametrize( 'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ): if "tmp_path" in args: __a : int = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(_SCREAMING_SNAKE_CASE , match='https://huggingface.co/docs/evaluate' ): func(*_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : int = module __a : List[Any] = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module A_ = "bigscience/bloom-1b7" # Constant values A_ = 2.109659552692574 A_ = "Hello my name is" A_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) A_ = 10 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_abit.config self.assertTrue(hasattr(__a , 'quantization_config' ) ) __a : Union[str, Any] = config.to_dict() __a : Tuple = config.to_diff_dict() __a : Tuple = config.to_json_string() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __a : List[Any] = self.model_fpaa.get_memory_footprint() __a : List[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __a : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BitsAndBytesConfig() __a : Tuple = True __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ) __a : List[Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BitsAndBytesConfig() with self.assertRaises(__a ): __a : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Optional[int] = self.model_fpaa.to(torch.floataa ) __a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __a : List[Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.half() # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.float() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : Any = 't5-small' __a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __a : int = AutoTokenizer.from_pretrained(cls.model_name ) __a : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules __a : List[str] = None # test with `t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : Any = model.generate(**__a ) # test with `flan-t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[Any] = model.generate(**__a ) __a : Optional[int] = modules def __UpperCAmelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[str] = model.generate(**__a ) # test with `flan-t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : int = model.generate(**__a ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # model_name __a : List[Any] = 'bigscience/bloom-560m' __a : Union[str, Any] = 't5-small' # Different types of model __a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Sequence classification model __a : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='auto' ) # CausalLM model __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Seq2seq model __a : Any = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __a : str = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __a : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __a : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __a : str = LoRALayer(module.q_proj , rank=16 ) __a : str = LoRALayer(module.k_proj , rank=16 ) __a : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __a : int = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "gpt2-xl" A_ = 3.3191854854152187
294
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=True , __a=1 / 255 , __a=True , ): '''simple docstring''' __a : Optional[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} __a : str = parent __a : Union[str, Any] = batch_size __a : List[Any] = num_channels __a : Dict = min_resolution __a : Tuple = max_resolution __a : Any = do_resize __a : Optional[Any] = size __a : int = do_normalize __a : List[str] = image_mean __a : Union[str, Any] = image_std __a : Dict = do_rescale __a : List[str] = rescale_factor __a : str = do_pad def __UpperCAmelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCAmelCase ( self , __a , __a=False ): '''simple docstring''' if not batched: __a : Optional[Any] = image_inputs[0] if isinstance(__a , Image.Image ): __a , __a : Any = image.size else: __a , __a : Optional[Any] = image.shape[1], image.shape[2] if w < h: __a : Tuple = int(self.size['shortest_edge'] * h / w ) __a : int = self.size['shortest_edge'] elif w > h: __a : int = self.size['shortest_edge'] __a : List[str] = int(self.size['shortest_edge'] * w / h ) else: __a : Tuple = self.size['shortest_edge'] __a : List[Any] = self.size['shortest_edge'] else: __a : Union[str, Any] = [] for image in image_inputs: __a , __a : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : Tuple = max(__a , key=lambda __a : item[0] )[0] __a : Optional[int] = max(__a , key=lambda __a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = YolosImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = YolosImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'image_mean' ) ) self.assertTrue(hasattr(__a , 'image_std' ) ) self.assertTrue(hasattr(__a , 'do_normalize' ) ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , __a ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __a , __a : int = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Any = self.image_processor_tester.get_expected_values(__a , batched=__a ) __a : Union[str, Any] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : Optional[int] = image_processing(__a , return_tensors='pt' ).pixel_values __a , __a : List[str] = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values __a , __a : Tuple = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) __a : Optional[Any] = self.image_processing_class(do_resize=__a , do_normalize=__a , do_rescale=__a ) # create random PyTorch tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __a : List[Any] = image_processing_a.pad(__a , return_tensors='pt' ) __a : Optional[Any] = image_processing_a(__a , return_tensors='pt' ) self.assertTrue( torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: __a : Optional[Any] = json.loads(f.read() ) __a : str = {'image_id': 3_9769, 'annotations': target} # encode them __a : int = YolosImageProcessor.from_pretrained('hustvl/yolos-small' ) __a : List[Any] = image_processing(images=__a , annotations=__a , return_tensors='pt' ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , __a ) __a : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __a : Any = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) ) # verify boxes __a : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __a ) __a : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) ) # verify is_crowd __a : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) ) # verify class_labels __a : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) ) # verify size __a : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: __a : Optional[Any] = json.loads(f.read() ) __a : Dict = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} __a : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them __a : int = YolosImageProcessor(format='coco_panoptic' ) __a : Union[str, Any] = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors='pt' ) # verify pixel values __a : Any = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , __a ) __a : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __a : Optional[int] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) ) # verify boxes __a : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __a ) __a : Union[str, Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) ) # verify image_id __a : Optional[int] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) ) # verify is_crowd __a : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) ) # verify class_labels __a : Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) ) # verify masks __a : Optional[Any] = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __a ) # verify orig_size __a : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) )
294
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = None A_ = None A_ = None A_ = None class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Any = project_dim __a : Optional[Any] = pooler_fn __a : int = learn_encoder __a : str = use_attention_mask class __UpperCamelCase ( lowerCAmelCase_ ): A_ = [r"pooler", r"logit_scale"] A_ = [r"position_ids", r"predictions.decoder.bias"] A_ = "roberta" A_ = RobertaSeriesConfig def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Optional[Any] = XLMRobertaModel(__a ) __a : str = nn.Linear(config.hidden_size , config.project_dim ) __a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a ) if self.has_pre_transformation: __a : int = nn.Linear(config.hidden_size , config.project_dim ) __a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Tuple = self.base_model( input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , ) if self.has_pre_transformation: __a : Optional[Any] = outputs['hidden_states'][-2] __a : Optional[int] = self.pre_LN(__a ) __a : Union[str, Any] = self.transformation_pre(__a ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __a : Optional[Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
294
1
'''simple docstring''' import pytest __lowercase : List[str] = '__dummy_dataset1__' __lowercase : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def lowerCamelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCamelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ): __a : str = dataset_loading_script_name __a : List[str] = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=_SCREAMING_SNAKE_CASE ) __a : str = script_dir / F"""{script_name}.py""" with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(_SCREAMING_SNAKE_CASE ) return str(_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=1_024 ): __a , __a : str = [], [] __a : Optional[Any] = list(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __a , __a : Any = sorted_examples[0] def is_too_big(_SCREAMING_SNAKE_CASE : Optional[Any] ): return tok(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __a : int = new_src + ' ' + src __a : Optional[int] = new_tgt + ' ' + tgt if is_too_big(_SCREAMING_SNAKE_CASE ) or is_too_big(_SCREAMING_SNAKE_CASE ): # cant fit, finalize example finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) __a , __a : Optional[Any] = src, tgt else: # can fit, keep adding __a , __a : str = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_SCREAMING_SNAKE_CASE ) finished_tgt.append(_SCREAMING_SNAKE_CASE ) return finished_src, finished_tgt def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ): __a : Dict = Path(_SCREAMING_SNAKE_CASE ) save_path.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for split in ["train"]: __a , __a : str = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" __a : List[Any] = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] __a : Union[str, Any] = [x.rstrip() for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()] __a , __a : List[Any] = pack_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""packed {split} split from {len(_SCREAMING_SNAKE_CASE )} examples -> {len(_SCREAMING_SNAKE_CASE )}.""" ) Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) ) Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) ) for split in ["val", "test"]: __a , __a : int = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F"""{split}.source""" ) shutil.copyfile(_SCREAMING_SNAKE_CASE , save_path / F"""{split}.target""" ) def lowerCamelCase (): __a : str = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=_SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=_SCREAMING_SNAKE_CASE , default=128 ) parser.add_argument('--data_dir' , type=_SCREAMING_SNAKE_CASE ) parser.add_argument('--save_path' , type=_SCREAMING_SNAKE_CASE ) __a : str = parser.parse_args() __a : Dict = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_SCREAMING_SNAKE_CASE , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy""" def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() def __UpperCAmelCase ( self , __a=0 , __a=(4, 4, 64, 64) , __a=False ): '''simple docstring''' __a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa __a : Tuple = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return image def __UpperCAmelCase ( self , __a=False , __a="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' __a : List[str] = jnp.bfloataa if fpaa else jnp.floataa __a : str = 'bf16' if fpaa else None __a , __a : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained( __a , subfolder='unet' , dtype=__a , revision=__a ) return model, params def __UpperCAmelCase ( self , __a=0 , __a=(4, 77, 768) , __a=False ): '''simple docstring''' __a : Dict = jnp.bfloataa if fpaa else jnp.floataa __a : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a , __a : Optional[int] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=__a ) __a : Any = self.get_latents(__a , fpaa=__a ) __a : Dict = self.get_encoder_hidden_states(__a , fpaa=__a ) __a : Optional[int] = model.apply( {'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __a : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __a : Any = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__a , __a , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a , __a : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=__a ) __a : int = self.get_latents(__a , shape=(4, 4, 96, 96) , fpaa=__a ) __a : Optional[Any] = self.get_encoder_hidden_states(__a , shape=(4, 77, 1024) , fpaa=__a ) __a : Optional[Any] = model.apply( {'params': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __a : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __a : Optional[int] = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__a , __a , atol=1E-2 )
294
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase : str = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ): __a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! __a : Optional[Any] = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } __a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) __a : Dict = BeautifulSoup(html.text , 'html.parser' ) __a : List[str] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) __a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE ) __a : List[str] = json.loads(_SCREAMING_SNAKE_CASE ) __a : List[Any] = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 __a : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , ) __a : Optional[Any] = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ): if index >= max_images: return index __a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Dict = urllib.request.build_opener() __a : Union[str, Any] = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_SCREAMING_SNAKE_CASE ) __a : List[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 _SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __lowercase : Optional[int] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
294
1
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __lowercase : Dict = logging.get_logger(__name__) __lowercase : Optional[int] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __lowercase : Optional[int] = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __lowercase : Optional[int] = { 'facebook/blenderbot_small-90M': 5_12, } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = BlenderbotSmallTokenizer def __init__( self , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , __a=True , **__a , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=__a , merges=__a , add_prefix_space=__a , trim_offsets=__a , ) , bos_token=__a , eos_token=__a , unk_token=__a , **__a , ) __a : Dict = add_prefix_space def __UpperCAmelCase ( self , __a , __a=None ): '''simple docstring''' __a : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Union[str, Any] = [self.sep_token_id] __a : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
294
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
1
'''simple docstring''' import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = BertTokenizer A_ = BertTokenizerFast A_ = True A_ = True A_ = filter_non_english def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() __a : Union[str, Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Union[str, Any] = 'UNwant\u00E9d,running' __a : List[str] = 'unwanted, running' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.tokenizer_class(self.vocab_file ) __a : int = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : Union[str, Any] = self.get_tokenizer() __a : Dict = self.get_rust_tokenizer() __a : int = 'UNwant\u00E9d,running' __a : Tuple = tokenizer.tokenize(__a ) __a : List[Any] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __a : Tuple = tokenizer.encode(__a , add_special_tokens=__a ) __a : str = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __a : Dict = self.get_rust_tokenizer() __a : int = tokenizer.encode(__a ) __a : Tuple = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # With lower casing __a : Optional[int] = self.get_tokenizer(do_lower_case=__a ) __a : Any = self.get_rust_tokenizer(do_lower_case=__a ) __a : List[str] = 'UNwant\u00E9d,running' __a : int = tokenizer.tokenize(__a ) __a : List[Any] = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __a : Any = tokenizer.encode(__a , add_special_tokens=__a ) __a : str = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __a : Dict = self.get_rust_tokenizer() __a : Optional[Any] = tokenizer.encode(__a ) __a : Optional[int] = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = BasicTokenizer() __a : Union[str, Any] = 'a\n\'ll !!to?\'d of, can\'t.' __a : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __a : str = {} for i, token in enumerate(__a ): __a : Union[str, Any] = i __a : Dict = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.tokenizer_class.from_pretrained('bert-base-uncased' ) __a : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __a : Tuple = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __a : Any = tokenizer.build_inputs_with_special_tokens(__a ) __a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __UpperCAmelCase ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : str = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __a : Dict = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __a : str = tokenizer_r.encode_plus( __a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , ) __a : Optional[int] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case' ) else False __a : int = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = ['的', '人', '有'] __a : Optional[Any] = ''.join(__a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : List[Any] = True __a : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a ) __a : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __a : Optional[int] = tokenizer_p.encode(__a , add_special_tokens=__a ) __a : str = tokenizer_r.encode(__a , add_special_tokens=__a ) __a : Tuple = tokenizer_r.convert_ids_to_tokens(__a ) __a : Any = tokenizer_p.convert_ids_to_tokens(__a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__a , __a ) self.assertListEqual(__a , __a ) __a : Optional[int] = False __a : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __a : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a ) __a : Tuple = tokenizer_r.encode(__a , add_special_tokens=__a ) __a : Union[str, Any] = tokenizer_p.encode(__a , add_special_tokens=__a ) __a : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a ) __a : str = tokenizer_p.convert_ids_to_tokens(__a ) # it is expected that only the first Chinese character is not preceded by "##". __a : int = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a ) ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , __a )
294
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __a : Dict = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Tuple = TFAutoModel.from_pretrained(__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Any = AutoModel.from_pretrained(__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __a : List[Any] = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Dict = TFAutoModelForPreTraining.from_pretrained(__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : str = AutoModelForPreTraining.from_pretrained(__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : int = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__a , from_pt=__a ) __a , __a : Optional[int] = TFAutoModelForCausalLM.from_pretrained( __a , output_loading_info=__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Optional[int] = AutoModelForCausalLM.from_pretrained(__a , from_tf=__a ) __a , __a : Any = AutoModelForCausalLM.from_pretrained( __a , output_loading_info=__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : int = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : str = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : str = TFAutoModelForMaskedLM.from_pretrained(__a , from_pt=__a ) __a , __a : str = TFAutoModelForMaskedLM.from_pretrained( __a , output_loading_info=__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Any = AutoModelForMaskedLM.from_pretrained(__a , from_tf=__a ) __a , __a : Tuple = AutoModelForMaskedLM.from_pretrained( __a , output_loading_info=__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Tuple = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(__a , from_pt=__a ) __a , __a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __a , output_loading_info=__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__a , from_tf=__a ) __a , __a : int = AutoModelForSeqaSeqLM.from_pretrained( __a , output_loading_info=__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __a : Optional[Any] = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __a : List[Any] = AutoConfig.from_pretrained(__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Any = TFAutoModelForQuestionAnswering.from_pretrained(__a , from_pt=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) __a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__a , from_tf=__a ) self.assertIsNotNone(__a ) self.assertIsInstance(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 ) __a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 ) __a : List[Any] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a ) self.assertIsInstance(__a , __a ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=3 , __a=True , __a=True , __a=0.1 , __a=0.1 , __a=224 , __a=1000 , __a=[3, 3, 6, 4] , __a=[48, 56, 112, 220] , ): '''simple docstring''' __a : Optional[int] = parent __a : Dict = batch_size __a : Dict = num_channels __a : str = is_training __a : Optional[int] = use_labels __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[Any] = num_labels __a : Tuple = image_size __a : Optional[Any] = layer_depths __a : Dict = embed_dims def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : int = None if self.use_labels: __a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __a : List[str] = self.get_config() return config, pixel_values, labels def __UpperCAmelCase ( self ): '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__a , layer_scale_init_value=1E-5 , ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : int = SwiftFormerModel(config=__a ) model.to(__a ) model.eval() __a : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Tuple = self.num_labels __a : Optional[int] = SwiftFormerForImageClassification(__a ) model.to(__a ) model.eval() __a : int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __a : int = SwiftFormerForImageClassification(__a ) model.to(__a ) model.eval() __a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self ): '''simple docstring''' ((__a) , (__a) , (__a)) : Tuple = self.prepare_config_and_inputs() __a : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () A_ = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = SwiftFormerModelTester(self ) __a : Dict = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Optional[Any] = model_class(__a ) __a : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[Any] = model_class(__a ) __a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[int] = [*signature.parameters.keys()] __a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : int = SwiftFormerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' def check_hidden_states_output(__a , __a , __a ): __a : Optional[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) __a : Any = outputs.hidden_states __a : Tuple = 8 self.assertEqual(len(__a ) , __a ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__a ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Any = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : Union[str, Any] = True check_hidden_states_output(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' def _config_zero_init(__a ): __a : List[Any] = copy.deepcopy(__a ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__a , __a , 1E-1_0 ) if isinstance(getattr(__a , __a , __a ) , __a ): __a : List[Any] = _config_zero_init(getattr(__a , __a ) ) setattr(__a , __a , __a ) return configs_no_init __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[Any] = _config_zero_init(__a ) for model_class in self.all_model_classes: __a : Tuple = model_class(config=__a ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def lowerCamelCase (): __a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(__a ) __a : Optional[Any] = self.default_image_processor __a : Tuple = prepare_img() __a : int = image_processor(images=__a , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**__a ) # verify the logits __a : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : int = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ): if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path __a : Dict = quote(_SCREAMING_SNAKE_CASE ) return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): if len(_SCREAMING_SNAKE_CASE ) <= 1: return [tuple(_SCREAMING_SNAKE_CASE )] __a : Tuple = [] def generate(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , _SCREAMING_SNAKE_CASE ) for i in range(k - 1 ): if k % 2 == 0: # k is even __a , __a : Optional[Any] = arr[k - 1], arr[i] else: # k is odd __a , __a : List[str] = arr[k - 1], arr[0] generate(k - 1 , _SCREAMING_SNAKE_CASE ) generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": __lowercase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip() __lowercase : Dict = [int(item) for item in user_input.split(',')] print(heaps(arr))
294
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ): __a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[Any] = model __a : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Union[str, Any] = model.module if not keep_fpaa_wrapper: __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' ) __a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ): __a : Any = forward.__wrapped__ if forward == original_forward: break __a : str = forward if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[str] = model __a : Optional[int] = compiled_model return model def lowerCamelCase (): PartialState().wait_for_everyone() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ): for key, value in kwargs.items(): __a : Optional[int] = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = value return destination def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ): if port is None: __a : List[str] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
294
1
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ): __a , __a : Dict = image.size __a , __a : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a : str = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) __a : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_5_5.0 __a : str = image[None].transpose(0 , 3 , 1 , 2 ) __a : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ) return 2.0 * image - 1.0 class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a , __a , ): '''simple docstring''' super().__init__() self.register_modules(vqvae=__a , unet=__a , scheduler=__a ) @torch.no_grad() def __call__( self , __a = None , __a = 1 , __a = 100 , __a = 0.0 , __a = None , __a = "pil" , __a = True , ): '''simple docstring''' if isinstance(__a , PIL.Image.Image ): __a : Union[str, Any] = 1 elif isinstance(__a , torch.Tensor ): __a : Any = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a )}""" ) if isinstance(__a , PIL.Image.Image ): __a : List[str] = preprocess(__a ) __a , __a : List[Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a : str = (batch_size, self.unet.config.in_channels // 2, height, width) __a : Tuple = next(self.unet.parameters() ).dtype __a : Tuple = randn_tensor(__a , generator=__a , device=self.device , dtype=__a ) __a : Union[str, Any] = image.to(device=self.device , dtype=__a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(__a , device=self.device ) __a : Dict = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : str = {} if accepts_eta: __a : Optional[Any] = eta for t in self.progress_bar(__a ): # concat latents and low resolution image in the channel dimension. __a : Optional[Any] = torch.cat([latents, image] , dim=1 ) __a : Any = self.scheduler.scale_model_input(__a , __a ) # predict the noise residual __a : int = self.unet(__a , __a ).sample # compute the previous noisy sample x_t -> x_t-1 __a : Optional[int] = self.scheduler.step(__a , __a , __a , **__a ).prev_sample # decode the image latents with the VQVAE __a : Union[str, Any] = self.vqvae.decode(__a ).sample __a : List[Any] = torch.clamp(__a , -1.0 , 1.0 ) __a : Dict = image / 2 + 0.5 __a : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : Union[str, Any] = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
294
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
294
1
'''simple docstring''' # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=0 ): # Format the message. if name is None: __a : Optional[Any] = None else: __a : List[str] = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}' __a : str = fmt.format(_SCREAMING_SNAKE_CASE ) # Print and recurse (if needed). if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if msg is not None: print(_SCREAMING_SNAKE_CASE ) for k in val.keys(): recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 ) elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): print(_SCREAMING_SNAKE_CASE , ':' , val.size() ) else: print(_SCREAMING_SNAKE_CASE , ':' , _SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. __a : List[str] = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] __a : Dict = (num_heads, hidden_size, num_splits) + input_shape[1:] __a : Any = param.view(*_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = param.transpose(0 , 2 ) __a : str = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] __a : Any = (num_heads, num_splits, hidden_size) + input_shape[1:] __a : Optional[int] = param.view(*_SCREAMING_SNAKE_CASE ) __a : Dict = param.transpose(0 , 1 ).contiguous() __a : str = param.view(*_SCREAMING_SNAKE_CASE ) return param def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ): # The converted output model. __a : str = {} # old versions did not store training args __a : Tuple = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) __a : Union[str, Any] = ds_args.padded_vocab_size __a : Optional[int] = ds_args.max_position_embeddings __a : int = ds_args.hidden_size __a : int = ds_args.num_layers __a : List[Any] = ds_args.num_attention_heads __a : Tuple = ds_args.ffn_hidden_size # pprint(config) # The number of heads. __a : Union[str, Any] = config.n_head # The hidden_size per head. __a : Optional[Any] = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): __a : int = input_state_dict['checkpoint_version'] else: __a : Tuple = 0.0 # The model. __a : int = input_state_dict['model'] # The language model. __a : Dict = model['language_model'] # The embeddings. __a : Tuple = lm['embedding'] # The word embeddings. __a : Optional[int] = embeddings['word_embeddings']['weight'] # Truncate the embedding table to vocab_size rows. __a : List[str] = word_embeddings[: config.vocab_size, :] __a : int = word_embeddings # The position embeddings. __a : int = embeddings['position_embeddings']['weight'] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] __a : Tuple = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. __a : Any = pos_embeddings # The transformer. __a : Dict = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder'] # The regex to extract layer names. __a : Any = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' ) # The simple map of names for "automated" rules. __a : Any = { 'attention.dense': '.attn.c_proj.', 'self_attention.dense': '.attn.c_proj.', 'mlp.dense_h_to_4h': '.mlp.c_fc.', 'mlp.dense_4h_to_h': '.mlp.c_proj.', } # Extract the layers. for key, val in transformer.items(): # Match the name. __a : Tuple = layer_re.match(_SCREAMING_SNAKE_CASE ) # Stop if that's not a layer if m is None: break # The index of the layer. __a : Optional[int] = int(m.group(1 ) ) # The name of the operation. __a : Tuple = m.group(2 ) # Is it a weight or a bias? __a : Optional[int] = m.group(3 ) # The name of the layer. __a : Dict = F"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith('layernorm' ): __a : Union[str, Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2' __a : Tuple = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. __a : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : str = causal_mask # Insert a "dummy" tensor for masked_bias. __a : int = torch.tensor(-1e4 , dtype=torch.floataa ) __a : Any = masked_bias __a : List[str] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. __a : int = out_val.transpose(0 , 1 ).contiguous() # Store. __a : List[Any] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": __a : Any = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Store. No change of shape. __a : Any = out_val # Transpose the weights. elif weight_or_bias == "weight": __a : Tuple = megatron_to_transformers[op_name] __a : Union[str, Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": __a : Any = megatron_to_transformers[op_name] __a : List[Any] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. __a : int = transformer['final_layernorm.weight'] __a : int = transformer['final_layernorm.bias'] # For LM head, transformers' wants the matrix to weight embeddings. __a : Optional[int] = word_embeddings # It should be done! return output_state_dict def lowerCamelCase (): # Create the argument parser. __a : int = argparse.ArgumentParser() parser.add_argument('--print-checkpoint-structure' , action='store_true' ) parser.add_argument( 'path_to_checkpoint' , type=_SCREAMING_SNAKE_CASE , help='Path to the checkpoint file (.zip archive or direct .pt file)' , ) parser.add_argument( '--config_file' , default='' , type=_SCREAMING_SNAKE_CASE , help='An optional config json file describing the pre-trained model.' , ) __a : Tuple = parser.parse_args() # Extract the basename. __a : Union[str, Any] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith('.zip' ): with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint: with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict: __a : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) else: __a : str = torch.load(args.path_to_checkpoint , map_location='cpu' ) __a : int = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: __a : Optional[Any] = 'gelu_fast' elif ds_args.openai_gelu: __a : List[Any] = 'gelu_new' else: __a : Any = 'gelu' else: # in the very early days this used to be "gelu_new" __a : Any = 'gelu_new' # Spell out all parameters in case the defaults change. __a : Tuple = GPTaConfig( vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , ) else: __a : Optional[Any] = GPTaConfig.from_json_file(args.config_file ) __a : Dict = ['GPT2LMHeadModel'] # Convert. print('Converting' ) __a : Optional[int] = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: __a : Dict = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": __a : List[Any] = 'gpt2' elif tokenizer_type == "PretrainedFromHF": __a : int = ds_args.tokenizer_name_or_path else: raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: __a : Union[str, Any] = 'gpt2' __a : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = type(_SCREAMING_SNAKE_CASE ).__name__ __a : str = tokenizer_class # Store the config to file. print('Saving config' ) config.save_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizer based on args print(F"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) # Store the state_dict to file. __a : str = os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
294
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __UpperCamelCase : A_ = 42 A_ = None A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ): # Validation def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_SCREAMING_SNAKE_CASE ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( _SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _SCREAMING_SNAKE_CASE ) ) return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase : Dict = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
1
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): if num <= 0: __a : List[Any] = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(_SCREAMING_SNAKE_CASE ) __a : str = [True] * (num + 1) __a : Optional[Any] = [] __a : Optional[Any] = 2 __a : Union[str, Any] = int(math.sqrt(_SCREAMING_SNAKE_CASE ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(_SCREAMING_SNAKE_CASE ) # Set multiples of start be False for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ): if sieve[i] is True: __a : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(_SCREAMING_SNAKE_CASE ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
294
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __lowercase : Any = [0, 25, 50] __lowercase : int = [25, 50, 75] __lowercase : List[str] = fuzz.membership.trimf(X, abca) __lowercase : Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __lowercase : List[Any] = np.ones(75) __lowercase : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __lowercase : str = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __lowercase : Optional[Any] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __lowercase : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) __a : Tuple = (boundary[1] - boundary[0]) / steps __a : Optional[int] = boundary[0] __a : Union[str, Any] = boundary[1] __a : List[Any] = make_points(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ): __a : List[Any] = a + h while x < (b - h): yield x __a : List[Any] = x + h def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ): # enter your function here __a : Dict = (x - 0) * (x - 0) return y def lowerCamelCase (): __a : Optional[Any] = 0.0 # Lower bound of integration __a : str = 1.0 # Upper bound of integration __a : Any = 1_0.0 # define number of steps or resolution __a : Optional[Any] = [a, b] # define boundary of integration __a : List[Any] = method_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
294
'''simple docstring''' import sys __lowercase : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = 1 for digit in s: product *= int(_SCREAMING_SNAKE_CASE ) return product def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ): __a : Optional[int] = -sys.maxsize - 1 __a : Optional[Any] = n[:13] __a : int = 13 while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): __a : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: __a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) ) __a : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
294
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = CodeGenTokenizer A_ = CodeGenTokenizerFast A_ = True A_ = {"add_prefix_space": True} A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : Dict = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'lower newer' __a : Tuple = 'lower newer' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __a : str = 'lower newer' __a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) __a : List[str] = tokens + [tokenizer.unk_token] __a : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Any = 'lower newer' # Testing tokenization __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) __a : Dict = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a ) __a : int = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token __a : Any = tokens + [rust_tokenizer.unk_token] __a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input __a : List[Any] = 'This is a simple input' __a : Tuple = ['This is a simple input 1', 'This is a simple input 2'] __a : Tuple = ('This is a simple input', 'This is a pair') __a : str = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __a : str = 'This is a simple input' __a : Any = ['This is a simple input looooooooong', 'This is a simple input'] __a : Optional[int] = ('This is a simple input', 'This is a pair') __a : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __a : int = tokenizer.pad_token_id __a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' ) __a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) __a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' ) __a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = '$$$' __a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) __a : Union[str, Any] = 'This is a simple input' __a : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] __a : List[Any] = tokenizer.bos_token_id __a : List[str] = tokenizer(__a ) __a : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __a : Any = tokenizer.decode(out_s.input_ids ) __a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) __a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' __a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b' __a : Optional[int] = tokenizer.encode(__a ) __a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] __a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
1
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __lowercase : Dict = logging.getLogger(__name__) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=-1 ): '''simple docstring''' __a : Optional[Any] = label_idx def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if isinstance(__a , __a ): __a : List[Any] = mode.value __a : Any = os.path.join(__a , f"""{mode}.txt""" ) __a : str = 1 __a : List[Any] = [] with open(__a , encoding='utf-8' ) as f: __a : Tuple = [] __a : List[Any] = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) guid_index += 1 __a : Dict = [] __a : Optional[Any] = [] else: __a : str = line.split(' ' ) words.append(splits[0] ) if len(__a ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) return examples def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(__a ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __a : int = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(__a ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: __a : Optional[int] = f.read().splitlines() if "O" not in labels: __a : int = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self ): '''simple docstring''' super().__init__(label_idx=-2 ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: __a : Optional[int] = f.read().splitlines() if "O" not in labels: __a : Dict = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' if isinstance(__a , __a ): __a : Any = mode.value __a : int = os.path.join(__a , f"""{mode}.txt""" ) __a : Optional[Any] = 1 __a : List[Any] = [] with open(__a , encoding='utf-8' ) as f: for sentence in parse_incr(__a ): __a : Tuple = [] __a : List[Any] = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(__a ) == len(__a ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) ) guid_index += 1 return examples def __UpperCAmelCase ( self , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 0 for sentence in parse_incr(__a ): __a : List[Any] = preds_list[example_id] __a : List[Any] = '' for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(__a ) example_id += 1 def __UpperCAmelCase ( self , __a ): '''simple docstring''' if path: with open(__a , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) __a : Dict = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_SCREAMING_SNAKE_CASE ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ): # Initialise PyTorch model __a : Any = LxmertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"""Building PyTorch model from configuration: {config}""" ) __a : Tuple = LxmertForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowercase : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
294
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {'height': 18, 'width': 18} __a : int = parent __a : Dict = batch_size __a : Optional[int] = num_channels __a : List[Any] = image_size __a : Tuple = min_resolution __a : str = max_resolution __a : str = do_resize __a : Optional[Any] = size __a : str = apply_ocr def __UpperCAmelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = LayoutLMvaImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) self.assertTrue(hasattr(__a , 'apply_ocr' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a ) self.assertIsInstance(encoding.boxes , __a ) # Test batched __a : Any = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) __a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' ) __a : Optional[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a ) self.assertListEqual(encoding.boxes , __a ) # with apply_OCR = False __a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a ) __a : List[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
294
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 4 ): __a : List[str] = abs(_SCREAMING_SNAKE_CASE ) or 4 return [[1 + x + y * row_size for x in range(_SCREAMING_SNAKE_CASE )] for y in range(_SCREAMING_SNAKE_CASE )] def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): return reverse_row(transpose(_SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_column(matrix)) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): return reverse_row(reverse_column(_SCREAMING_SNAKE_CASE ) ) # OR.. reverse_column(reverse_row(matrix)) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): return reverse_column(transpose(_SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_row(matrix)) def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): __a : str = [list(_SCREAMING_SNAKE_CASE ) for x in zip(*_SCREAMING_SNAKE_CASE )] return matrix def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): __a : List[Any] = matrix[::-1] return matrix def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): __a : List[str] = [x[::-1] for x in matrix] return matrix def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): for i in matrix: print(*_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : int = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __lowercase : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __lowercase : Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
294
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase : List[Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ernie_m" A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __a : int = vocab_size __a : Dict = hidden_size __a : str = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : Union[str, Any] = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : int = initializer_range __a : Dict = layer_norm_eps __a : int = classifier_dropout __a : Dict = is_decoder __a : int = act_dropout
294
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=__a ).to(__a ) __a : str = AutoTokenizer.from_pretrained('google/mt5-small' ) __a : int = tokenizer('Hello there' , return_tensors='pt' ).input_ids __a : str = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __a : Optional[int] = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss __a : Optional[int] = -(labels.shape[-1] * loss.item()) __a : Any = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
294
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : int = module __a : List[Any] = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module A_ = "bigscience/bloom-1b7" # Constant values A_ = 2.109659552692574 A_ = "Hello my name is" A_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) A_ = 10 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_abit.config self.assertTrue(hasattr(__a , 'quantization_config' ) ) __a : Union[str, Any] = config.to_dict() __a : Tuple = config.to_diff_dict() __a : Tuple = config.to_json_string() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __a : List[Any] = self.model_fpaa.get_memory_footprint() __a : List[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __a : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BitsAndBytesConfig() __a : Tuple = True __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ) __a : List[Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BitsAndBytesConfig() with self.assertRaises(__a ): __a : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Optional[int] = self.model_fpaa.to(torch.floataa ) __a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __a : List[Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.half() # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.float() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : Any = 't5-small' __a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __a : int = AutoTokenizer.from_pretrained(cls.model_name ) __a : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules __a : List[str] = None # test with `t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : Any = model.generate(**__a ) # test with `flan-t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[Any] = model.generate(**__a ) __a : Optional[int] = modules def __UpperCAmelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[str] = model.generate(**__a ) # test with `flan-t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : int = model.generate(**__a ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # model_name __a : List[Any] = 'bigscience/bloom-560m' __a : Union[str, Any] = 't5-small' # Different types of model __a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Sequence classification model __a : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='auto' ) # CausalLM model __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Seq2seq model __a : Any = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __a : str = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __a : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __a : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __a : str = LoRALayer(module.q_proj , rank=16 ) __a : str = LoRALayer(module.k_proj , rank=16 ) __a : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __a : int = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "gpt2-xl" A_ = 3.3191854854152187
294
1
'''simple docstring''' import numpy as np def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ): return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = None A_ = None A_ = None A_ = None class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Any = project_dim __a : Optional[Any] = pooler_fn __a : int = learn_encoder __a : str = use_attention_mask class __UpperCamelCase ( lowerCAmelCase_ ): A_ = [r"pooler", r"logit_scale"] A_ = [r"position_ids", r"predictions.decoder.bias"] A_ = "roberta" A_ = RobertaSeriesConfig def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Optional[Any] = XLMRobertaModel(__a ) __a : str = nn.Linear(config.hidden_size , config.project_dim ) __a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a ) if self.has_pre_transformation: __a : int = nn.Linear(config.hidden_size , config.project_dim ) __a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Tuple = self.base_model( input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , ) if self.has_pre_transformation: __a : Optional[Any] = outputs['hidden_states'][-2] __a : Optional[int] = self.pre_LN(__a ) __a : Union[str, Any] = self.transformation_pre(__a ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __a : Optional[Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
294
1
'''simple docstring''' import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __UpperCamelCase : def __init__( self , __a , __a=None , __a=None , __a=None , __a="resnet50" , __a=3 , __a=32 , __a=3 , __a=True , __a=True , ): '''simple docstring''' __a : List[Any] = parent __a : Dict = out_indices if out_indices is not None else [4] __a : str = stage_names __a : int = out_features __a : List[str] = backbone __a : List[Any] = batch_size __a : Tuple = image_size __a : Dict = num_channels __a : int = use_pretrained_backbone __a : int = is_training def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Any = self.get_config() return config, pixel_values def __UpperCAmelCase ( self ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : List[Any] = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __a : int = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.prepare_config_and_inputs() __a , __a : int = config_and_inputs __a : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TimmBackbone,) if is_torch_available() else () A_ = {"feature-extraction": TimmBackbone} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = TimmBackboneModelTester(self ) __a : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = 'resnet18' __a : str = 'microsoft/resnet-18' __a : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __a : str = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __a : Optional[int] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __a : str = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('Safetensors is not supported by timm.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Optional[int] = model_class(__a ) __a : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Optional[Any] = [*signature.parameters.keys()] __a : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[Any] = True __a : Optional[Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __a : Tuple = self.all_model_classes[0] __a : int = model_class(__a ) model.to(__a ) __a : int = self._prepare_for_class(__a , __a ) __a : Any = model(**__a ) __a : Tuple = outputs[0][-1] # Encoder-/Decoder-only models __a : List[str] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __a : Union[str, Any] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : int = model_class(__a ) model.to(__a ) model.eval() __a : List[str] = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __a : Dict = copy.deepcopy(__a ) __a : Tuple = None __a : Any = model_class(__a ) model.to(__a ) model.eval() __a : List[str] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __a : Union[str, Any] = copy.deepcopy(__a ) __a : Union[str, Any] = False __a : Any = model_class(__a ) model.to(__a ) model.eval() __a : Union[str, Any] = model(**__a )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __lowercase : List[str] = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[Any] = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowercase : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=False ): __a : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'deit.embeddings.cls_token'), ('dist_token', 'deit.embeddings.distillation_token'), ('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'deit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" __a : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('norm.weight', 'deit.layernorm.weight'), ('norm.bias', 'deit.layernorm.bias'), ('head.weight', 'cls_classifier.weight'), ('head.bias', 'cls_classifier.bias'), ('head_dist.weight', 'distillation_classifier.weight'), ('head_dist.bias', 'distillation_classifier.bias'), ] ) return rename_keys def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any=False ): for i in range(config.num_hidden_layers ): if base_model: __a : List[Any] = '' else: __a : Union[str, Any] = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __a : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) __a : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __a : List[Any] = in_proj_weight[ : config.hidden_size, : ] __a : List[Any] = in_proj_bias[: config.hidden_size] __a : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __a : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __a : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] __a : Tuple = in_proj_bias[-config.hidden_size :] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ): __a : Union[str, Any] = dct.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = val def lowerCamelCase (): __a : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' __a : Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ): __a : Optional[Any] = DeiTConfig() # all deit models have fine-tuned heads __a : int = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size __a : str = 1_000 __a : str = 'huggingface/label-files' __a : Union[str, Any] = 'imagenet-1k-id2label.json' __a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __a : List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __a : List[str] = idalabel __a : Optional[int] = {v: k for k, v in idalabel.items()} __a : Union[str, Any] = int(deit_name[-6:-4] ) __a : List[str] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('tiny' ): __a : Dict = 192 __a : List[Any] = 768 __a : Optional[int] = 12 __a : Any = 3 elif deit_name[9:].startswith('small' ): __a : Union[str, Any] = 384 __a : List[str] = 1_536 __a : List[str] = 12 __a : int = 6 if deit_name[9:].startswith('base' ): pass elif deit_name[4:].startswith('large' ): __a : str = 1_024 __a : Tuple = 4_096 __a : Any = 24 __a : Any = 16 # load original model from timm __a : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys __a : Union[str, Any] = timm_model.state_dict() __a : str = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model __a : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by DeiTImageProcessor __a : Optional[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 __a : List[Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE , crop_size=config.image_size ) __a : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) __a : Dict = encoding['pixel_values'] __a : str = model(_SCREAMING_SNAKE_CASE ) __a : Dict = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __lowercase : Optional[Any] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
294
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase : str = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ): __a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! __a : Optional[Any] = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } __a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) __a : Dict = BeautifulSoup(html.text , 'html.parser' ) __a : List[str] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) __a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE ) __a : List[str] = json.loads(_SCREAMING_SNAKE_CASE ) __a : List[Any] = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 __a : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , ) __a : Optional[Any] = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ): if index >= max_images: return index __a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Dict = urllib.request.build_opener() __a : Union[str, Any] = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_SCREAMING_SNAKE_CASE ) __a : List[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 _SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __lowercase : Optional[int] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
294
1
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig __lowercase : int = logging.get_logger(__name__) __lowercase : List[str] = 'T5Config' def lowerCamelCase (_SCREAMING_SNAKE_CASE : jnp.array , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): __a : Optional[Any] = jnp.zeros_like(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) __a : int = shifted_input_ids.at[:, 0].set(_SCREAMING_SNAKE_CASE ) __a : List[str] = jnp.where(shifted_input_ids == -100 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return shifted_input_ids class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mt5" A_ = MTaConfig class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mt5" A_ = MTaConfig class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "mt5" A_ = MTaConfig
294
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = 'ylacombe/bark-small' __a : List[str] = tempfile.mkdtemp() __a : Optional[Any] = 'en_speaker_1' __a : Dict = 'This is a test string' __a : Optional[Any] = 'speaker_embeddings_path.json' __a : Union[str, Any] = 'speaker_embeddings' def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.get_tokenizer() __a : Any = BarkProcessor(tokenizer=__a ) processor.save_pretrained(self.tmpdirname ) __a : Any = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __a : Tuple = 35 __a : Optional[int] = 2 __a : int = 8 __a : List[Any] = { 'semantic_prompt': np.ones(__a ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __a : str = processor(text=self.input_string , voice_preset=__a ) __a : int = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() ) # test loading voice preset from npz file __a : List[Any] = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(__a , **__a ) __a : Tuple = processor(text=self.input_string , voice_preset=__a ) __a : List[str] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() ) # test loading voice preset from the hub __a : Dict = processor(text=self.input_string , voice_preset=self.voice_preset ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : Optional[int] = BarkProcessor(tokenizer=__a ) __a : int = processor(text=self.input_string ) __a : List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
294
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_000_000 ): __a : Any = set(range(3 , _SCREAMING_SNAKE_CASE , 2 ) ) primes.add(2 ) for p in range(3 , _SCREAMING_SNAKE_CASE , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ) __a : Tuple = [float(_SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )] for p in primes: for n in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowercase : Optional[Any] = False @skip_mps class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = StableDiffusionAttendAndExcitePipeline A_ = False A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __a : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) __a : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __a : List[Any] = CLIPTextModel(__a ) __a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __a : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' if str(__a ).startswith('mps' ): __a : List[Any] = torch.manual_seed(__a ) else: __a : Tuple = torch.Generator(device=__a ).manual_seed(__a ) __a : str = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = 'cpu' __a : List[Any] = self.get_dummy_components() __a : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __a : str = self.get_dummy_inputs(__a ) __a : str = pipe(**__a ).images __a : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) __a : int = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) __a : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1E-3 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def __UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = torch.manual_seed(51 ) __a : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to('cuda' ) __a : Any = 'a painting of an elephant with glasses' __a : str = [5, 7] __a : Any = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] __a : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): __a : Tuple = [] for line in lines: __a : Dict = re.sub(r'#.*' , '' , _SCREAMING_SNAKE_CASE ) # remove comments if line: filtered_lines.append(_SCREAMING_SNAKE_CASE ) __a : List[Any] = '\n'.join(_SCREAMING_SNAKE_CASE ) # Make a hash from all this code __a : List[str] = full_str.encode('utf-8' ) return shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() # get importable module names and hash for caching __lowercase : int = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __lowercase : str = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __lowercase : Union[str, Any] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name __lowercase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example __lowercase : Union[str, Any] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): __a : Dict = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): __a : List[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1: neighbour_count += cells[i + 1][j] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __a : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_SCREAMING_SNAKE_CASE ) return next_generation def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ): __a : Any = [] for _ in range(_SCREAMING_SNAKE_CASE ): # Create output image __a : Optional[Any] = Image.new('RGB' , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) ) __a : Any = img.load() # Save cells to image for x in range(len(_SCREAMING_SNAKE_CASE ) ): for y in range(len(cells[0] ) ): __a : Any = 255 - cells[y][x] * 255 __a : Optional[Any] = (colour, colour, colour) # Save image images.append(_SCREAMING_SNAKE_CASE ) __a : str = new_generation(_SCREAMING_SNAKE_CASE ) return images if __name__ == "__main__": __lowercase : Optional[Any] = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
294
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
1
'''simple docstring''' import math import sys def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): if number != int(_SCREAMING_SNAKE_CASE ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __a : Optional[int] = [-1] * (number + 1) __a : Union[str, Any] = 0 for i in range(1 , number + 1 ): __a : Optional[int] = sys.maxsize __a : Optional[int] = int(math.sqrt(_SCREAMING_SNAKE_CASE ) ) for j in range(1 , root + 1 ): __a : List[Any] = 1 + answers[i - (j**2)] __a : Optional[int] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : List[Any] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ): __a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[Any] = model __a : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Union[str, Any] = model.module if not keep_fpaa_wrapper: __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' ) __a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ): __a : Any = forward.__wrapped__ if forward == original_forward: break __a : str = forward if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[str] = model __a : Optional[int] = compiled_model return model def lowerCamelCase (): PartialState().wait_for_everyone() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ): for key, value in kwargs.items(): __a : Optional[int] = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = value return destination def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ): if port is None: __a : List[str] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
294
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : str = logging.get_logger(__name__) __lowercase : Dict = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "glpn" def __init__( self , __a=3 , __a=4 , __a=[2, 2, 2, 2] , __a=[8, 4, 2, 1] , __a=[32, 64, 160, 256] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[1, 2, 5, 8] , __a=[4, 4, 4, 4] , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.1 , __a=1E-6 , __a=64 , __a=10 , __a=-1 , **__a , ): '''simple docstring''' super().__init__(**__a ) __a : Optional[int] = num_channels __a : Optional[int] = num_encoder_blocks __a : Tuple = depths __a : Union[str, Any] = sr_ratios __a : Tuple = hidden_sizes __a : str = patch_sizes __a : Dict = strides __a : Any = mlp_ratios __a : str = num_attention_heads __a : str = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = initializer_range __a : Any = drop_path_rate __a : int = layer_norm_eps __a : int = decoder_hidden_size __a : List[str] = max_depth __a : int = head_in_index
294
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Union[str, Any] = [ 'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __UpperCamelCase : A_ = 42 A_ = None A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ): # Validation def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_SCREAMING_SNAKE_CASE ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( _SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _SCREAMING_SNAKE_CASE ) ) return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCAmelCase_ ): A_ = (DDPMParallelScheduler,) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**__a ) return config def __UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def __UpperCAmelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.scheduler_classes[0] __a : Any = self.get_scheduler_config() __a : List[str] = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : List[Any] = scheduler_class(**__a ) __a : Optional[int] = len(__a ) __a : Optional[int] = self.dummy_model() __a : Any = self.dummy_sample_deter __a : Optional[Any] = self.dummy_sample_deter + 0.1 __a : Optional[int] = self.dummy_sample_deter - 0.1 __a : Tuple = samplea.shape[0] __a : str = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Optional[Any] = torch.arange(__a )[0:3, None].repeat(1 , __a ) __a : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : str = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __a : Dict = torch.sum(torch.abs(__a ) ) __a : Optional[Any] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Tuple = self.get_scheduler_config() __a : str = scheduler_class(**__a ) __a : Optional[Any] = len(__a ) __a : Union[str, Any] = self.dummy_model() __a : Dict = self.dummy_sample_deter __a : List[Any] = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual __a : int = model(__a , __a ) # 2. predict previous mean of sample x_t-1 __a : Union[str, Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample __a : str = pred_prev_sample __a : Optional[int] = torch.sum(torch.abs(__a ) ) __a : str = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' ) __a : Optional[Any] = scheduler_class(**__a ) __a : List[str] = len(__a ) __a : Tuple = self.dummy_model() __a : Any = self.dummy_sample_deter __a : List[Any] = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual __a : Optional[Any] = model(__a , __a ) # 2. predict previous mean of sample x_t-1 __a : str = scheduler.step(__a , __a , __a , generator=__a ).prev_sample __a : Optional[int] = pred_prev_sample __a : Any = torch.sum(torch.abs(__a ) ) __a : Dict = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.scheduler_classes[0] __a : List[Any] = self.get_scheduler_config() __a : str = scheduler_class(**__a ) __a : List[str] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__a ) __a : str = scheduler.timesteps for i, timestep in enumerate(__a ): if i == len(__a ) - 1: __a : List[Any] = -1 else: __a : Optional[int] = timesteps[i + 1] __a : str = scheduler.previous_timestep(__a ) __a : List[Any] = prev_t.item() self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.scheduler_classes[0] __a : str = self.get_scheduler_config() __a : Union[str, Any] = scheduler_class(**__a ) __a : Union[str, Any] = [100, 87, 50, 51, 0] with self.assertRaises(__a , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config() __a : Dict = scheduler_class(**__a ) __a : List[str] = [100, 87, 50, 1, 0] __a : List[str] = len(__a ) with self.assertRaises(__a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.scheduler_classes[0] __a : int = self.get_scheduler_config() __a : Union[str, Any] = scheduler_class(**__a ) __a : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( __a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=__a )
294
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __lowercase : List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = BartphoTokenizer A_ = False A_ = True def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() __a : Dict = ['▁This', '▁is', '▁a', '▁t', 'est'] __a : List[Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Union[str, Any] = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] ) with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __a : Union[str, Any] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = 'This is a là test' __a : Dict = 'This is a<unk><unk> test' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) __a : int = 'This is a là test' __a : Optional[Any] = '▁This ▁is ▁a ▁l à ▁t est'.split() __a : Any = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __a : Union[str, Any] = tokens + [tokenizer.unk_token] __a : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
294
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __lowercase : Any = [0, 25, 50] __lowercase : int = [25, 50, 75] __lowercase : List[str] = fuzz.membership.trimf(X, abca) __lowercase : Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __lowercase : List[Any] = np.ones(75) __lowercase : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __lowercase : str = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __lowercase : Optional[Any] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __lowercase : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
294
1
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
'''simple docstring''' import sys __lowercase : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = 1 for digit in s: product *= int(_SCREAMING_SNAKE_CASE ) return product def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ): __a : Optional[int] = -sys.maxsize - 1 __a : Optional[Any] = n[:13] __a : int = 13 while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): __a : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: __a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) ) __a : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0: raise ValueError('Input must be a non-negative integer' ) __a : List[Any] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = CodeGenTokenizer A_ = CodeGenTokenizerFast A_ = True A_ = {"add_prefix_space": True} A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : Dict = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'lower newer' __a : Tuple = 'lower newer' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __a : str = 'lower newer' __a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) __a : List[str] = tokens + [tokenizer.unk_token] __a : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Any = 'lower newer' # Testing tokenization __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) __a : Dict = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a ) __a : int = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token __a : Any = tokens + [rust_tokenizer.unk_token] __a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input __a : List[Any] = 'This is a simple input' __a : Tuple = ['This is a simple input 1', 'This is a simple input 2'] __a : Tuple = ('This is a simple input', 'This is a pair') __a : str = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __a : str = 'This is a simple input' __a : Any = ['This is a simple input looooooooong', 'This is a simple input'] __a : Optional[int] = ('This is a simple input', 'This is a pair') __a : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __a : int = tokenizer.pad_token_id __a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' ) __a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) __a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' ) __a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = '$$$' __a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) __a : Union[str, Any] = 'This is a simple input' __a : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] __a : List[Any] = tokenizer.bos_token_id __a : List[str] = tokenizer(__a ) __a : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __a : Any = tokenizer.decode(out_s.input_ids ) __a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) __a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' __a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b' __a : Optional[int] = tokenizer.encode(__a ) __a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] __a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: __lowercase : Optional[Any] = None try: import msvcrt except ImportError: __lowercase : int = None try: import fcntl except ImportError: __lowercase : Optional[int] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __lowercase : int = OSError # Data # ------------------------------------------------ __lowercase : Any = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] __lowercase : Optional[Any] = '3.0.12' __lowercase : Any = None def lowerCamelCase (): global _logger __a : List[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' __a : Tuple = lock_file return None def __str__( self ): '''simple docstring''' __a : int = f"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __UpperCamelCase : def __init__( self , __a ): '''simple docstring''' __a : Dict = lock return None def __enter__( self ): '''simple docstring''' return self.lock def __exit__( self , __a , __a , __a ): '''simple docstring''' self.lock.release() return None class __UpperCamelCase : def __init__( self , __a , __a=-1 , __a=None ): '''simple docstring''' __a : int = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __a : str = self.hash_filename_if_too_long(__a , __a ) # The path to the lock file. __a : Dict = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __a : List[str] = None # The default timeout value. __a : Union[str, Any] = timeout # We use this lock primarily for the lock counter. __a : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __a : Union[str, Any] = 0 return None @property def __UpperCAmelCase ( self ): '''simple docstring''' return self._lock_file @property def __UpperCAmelCase ( self ): '''simple docstring''' return self._timeout @timeout.setter def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : int = float(__a ) return None def __UpperCAmelCase ( self ): '''simple docstring''' raise NotImplementedError() def __UpperCAmelCase ( self ): '''simple docstring''' raise NotImplementedError() @property def __UpperCAmelCase ( self ): '''simple docstring''' return self._lock_file_fd is not None def __UpperCAmelCase ( self , __a=None , __a=0.05 ): '''simple docstring''' if timeout is None: __a : Dict = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __a : List[str] = id(self ) __a : Optional[Any] = self._lock_file __a : List[str] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__a ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __a : Union[str, Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __UpperCAmelCase ( self , __a=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __a : List[str] = id(self ) __a : Tuple = self._lock_file logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() __a : Any = 0 logger().debug(f"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self ): '''simple docstring''' self.acquire() return self def __exit__( self , __a , __a , __a ): '''simple docstring''' self.release() return None def __del__( self ): '''simple docstring''' self.release(force=__a ) return None def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : str = os.path.basename(__a ) if len(__a ) > max_length and max_length > 0: __a : Tuple = os.path.dirname(__a ) __a : Tuple = str(hash(__a ) ) __a : Tuple = filename[: max_length - len(__a ) - 8] + '...' + hashed_filename + '.lock' return os.path.join(__a , __a ) else: return path class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=-1 , __a=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__a , timeout=__a , max_filename_length=__a ) __a : List[Any] = '\\\\?\\' + relative_to_absolute_path(self.lock_file ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __a : Dict = os.open(self._lock_file , __a ) except OSError: pass else: try: msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__a ) else: __a : Tuple = fd return None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self._lock_file_fd __a : List[str] = None msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 ) os.close(__a ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=-1 , __a=None ): '''simple docstring''' __a : Optional[int] = os.statvfs(os.path.dirname(__a ) ).f_namemax super().__init__(__a , timeout=__a , max_filename_length=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC __a : Union[str, Any] = os.open(self._lock_file , __a ) try: fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__a ) else: __a : Tuple = fd return None def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self._lock_file_fd __a : Union[str, Any] = None fcntl.flock(__a , fcntl.LOCK_UN ) os.close(__a ) return None class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __a : List[str] = os.open(self._lock_file , __a ) except OSError: pass else: __a : Union[str, Any] = fd return None def __UpperCAmelCase ( self ): '''simple docstring''' os.close(self._lock_file_fd ) __a : Tuple = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __lowercase : int = None if msvcrt: __lowercase : Optional[int] = WindowsFileLock elif fcntl: __lowercase : Dict = UnixFileLock else: __lowercase : str = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) __a : Dict = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_SCREAMING_SNAKE_CASE ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowerCamelCase (_SCREAMING_SNAKE_CASE : bool = True , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : str ): if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) __a : Any = False if main_process_only: __a : Union[str, Any] = PartialState().local_process_index == 0 return _tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , disable=_SCREAMING_SNAKE_CASE )
294
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {'height': 18, 'width': 18} __a : int = parent __a : Dict = batch_size __a : Optional[int] = num_channels __a : List[Any] = image_size __a : Tuple = min_resolution __a : str = max_resolution __a : str = do_resize __a : Optional[Any] = size __a : str = apply_ocr def __UpperCAmelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = LayoutLMvaImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) self.assertTrue(hasattr(__a , 'apply_ocr' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a ) self.assertIsInstance(encoding.boxes , __a ) # Test batched __a : Any = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) __a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' ) __a : Optional[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a ) self.assertListEqual(encoding.boxes , __a ) # with apply_OCR = False __a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a ) __a : List[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
294
1
'''simple docstring''' from math import factorial __lowercase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_SCREAMING_SNAKE_CASE ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 60 , _SCREAMING_SNAKE_CASE : int = 1_000_000 ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length __a : Optional[Any] = 0 # the cached sizes of the previous chains __a : dict[int, int] = {} for start_chain_element in range(1 , _SCREAMING_SNAKE_CASE ): # The temporary set will contain the elements of the chain __a : Union[str, Any] = set() __a : Dict = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __a : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_SCREAMING_SNAKE_CASE ) chain_set_length += 1 __a : int = digit_factorial_sum(_SCREAMING_SNAKE_CASE ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __a : str = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
294
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase : List[Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ernie_m" A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __a : int = vocab_size __a : Dict = hidden_size __a : str = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : Union[str, Any] = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : int = initializer_range __a : Dict = layer_norm_eps __a : int = classifier_dropout __a : Dict = is_decoder __a : int = act_dropout
294
1
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __lowercase : Tuple = 'src/transformers' __lowercase : Union[str, Any] = 'docs/source/en/tasks' def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __a : Optional[Any] = f.readlines() # Find the start prompt. __a : Tuple = 0 while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ): start_index += 1 start_index += 1 __a : Union[str, Any] = start_index while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __lowercase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) __lowercase : Any = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __lowercase : Union[str, Any] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ): __a : Any = TASK_GUIDE_TO_MODELS[task_guide] __a : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_SCREAMING_SNAKE_CASE , set() ) __a : Optional[int] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=False ): __a , __a , __a , __a : List[Any] = _find_text_in_file( filename=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) __a : List[Any] = get_model_list_for_task(_SCREAMING_SNAKE_CASE ) if current_list != new_list: if overwrite: with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.' ) if __name__ == "__main__": __lowercase : str = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __lowercase : Optional[int] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
294
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : int = module __a : List[Any] = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module A_ = "bigscience/bloom-1b7" # Constant values A_ = 2.109659552692574 A_ = "Hello my name is" A_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) A_ = 10 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_abit.config self.assertTrue(hasattr(__a , 'quantization_config' ) ) __a : Union[str, Any] = config.to_dict() __a : Tuple = config.to_diff_dict() __a : Tuple = config.to_json_string() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __a : List[Any] = self.model_fpaa.get_memory_footprint() __a : List[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __a : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BitsAndBytesConfig() __a : Tuple = True __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ) __a : List[Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BitsAndBytesConfig() with self.assertRaises(__a ): __a : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Optional[int] = self.model_fpaa.to(torch.floataa ) __a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __a : List[Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.half() # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.float() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : Any = 't5-small' __a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __a : int = AutoTokenizer.from_pretrained(cls.model_name ) __a : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules __a : List[str] = None # test with `t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : Any = model.generate(**__a ) # test with `flan-t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[Any] = model.generate(**__a ) __a : Optional[int] = modules def __UpperCAmelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[str] = model.generate(**__a ) # test with `flan-t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : int = model.generate(**__a ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # model_name __a : List[Any] = 'bigscience/bloom-560m' __a : Union[str, Any] = 't5-small' # Different types of model __a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Sequence classification model __a : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='auto' ) # CausalLM model __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Seq2seq model __a : Any = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __a : str = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __a : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __a : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __a : str = LoRALayer(module.q_proj , rank=16 ) __a : str = LoRALayer(module.k_proj , rank=16 ) __a : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __a : int = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "gpt2-xl" A_ = 3.3191854854152187
294
1
'''simple docstring''' import baseaa def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): return baseaa.aaaencode(string.encode('utf-8' ) ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : bytes ): return baseaa.aaadecode(_SCREAMING_SNAKE_CASE ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = None A_ = None A_ = None A_ = None class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Any = project_dim __a : Optional[Any] = pooler_fn __a : int = learn_encoder __a : str = use_attention_mask class __UpperCamelCase ( lowerCAmelCase_ ): A_ = [r"pooler", r"logit_scale"] A_ = [r"position_ids", r"predictions.decoder.bias"] A_ = "roberta" A_ = RobertaSeriesConfig def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Optional[Any] = XLMRobertaModel(__a ) __a : str = nn.Linear(config.hidden_size , config.project_dim ) __a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a ) if self.has_pre_transformation: __a : int = nn.Linear(config.hidden_size , config.project_dim ) __a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Tuple = self.base_model( input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , ) if self.has_pre_transformation: __a : Optional[Any] = outputs['hidden_states'][-2] __a : Optional[int] = self.pre_LN(__a ) __a : Union[str, Any] = self.transformation_pre(__a ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __a : Optional[Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
294
1
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar __lowercase : str = TypeVar('_T') class __UpperCamelCase ( Generic[_T] ): def __init__( self , __a = None ): '''simple docstring''' __a : list[_T] = list(iterable or [] ) __a : list[_T] = [] def __len__( self ): '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__( self ): '''simple docstring''' return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def __UpperCAmelCase ( self , __a ): '''simple docstring''' self._stacka.append(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self._stacka.pop __a : int = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('Queue is empty' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowercase : List[str] = logging.get_logger(__name__) __lowercase : Tuple = {'tokenizer_file': 'tokenizer.json'} __lowercase : List[str] = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = ["input_ids", "attention_mask"] A_ = None def __init__( self , __a=None , __a=None , __a=None , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a=False , __a=False , **__a , ): '''simple docstring''' super().__init__( __a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , ) __a : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space: __a : Optional[Any] = getattr(__a , pre_tok_state.pop('type' ) ) __a : Optional[Any] = add_prefix_space __a : List[str] = pre_tok_class(**__a ) __a : List[str] = add_prefix_space def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' __a : List[Any] = kwargs.get('is_split_into_words' , __a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ' pretokenized inputs.' ) return super()._batch_encode_plus(*__a , **__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' __a : str = kwargs.get('is_split_into_words' , __a ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ' pretokenized inputs.' ) return super()._encode_plus(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' __a : Union[str, Any] = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] ) if len(__a ) > self.model_max_length: __a : Union[str, Any] = input_ids[-self.model_max_length :] return input_ids
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : Optional[int] = logging.get_logger(__name__) __lowercase : Any = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ibert" def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=False , __a="none" , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Tuple = vocab_size __a : Any = hidden_size __a : Tuple = num_hidden_layers __a : Dict = num_attention_heads __a : Tuple = hidden_act __a : Any = intermediate_size __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Any = type_vocab_size __a : Optional[Any] = initializer_range __a : Optional[int] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : int = quant_mode __a : Optional[int] = force_dequant class __UpperCamelCase ( lowerCAmelCase_ ): @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": __a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : str = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
294
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase : str = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ): __a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! __a : Optional[Any] = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } __a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) __a : Dict = BeautifulSoup(html.text , 'html.parser' ) __a : List[str] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) __a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE ) __a : List[str] = json.loads(_SCREAMING_SNAKE_CASE ) __a : List[Any] = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 __a : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , ) __a : Optional[Any] = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ): if index >= max_images: return index __a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Dict = urllib.request.build_opener() __a : Union[str, Any] = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_SCREAMING_SNAKE_CASE ) __a : List[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 _SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __lowercase : Optional[int] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
294
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = 42 class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ): @register_to_config def __init__( self , __a = 16 , __a = 88 , __a = None , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = "geglu" , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : List[Any] = num_attention_heads __a : List[str] = attention_head_dim __a : Optional[Any] = num_attention_heads * attention_head_dim __a : Dict = in_channels __a : Dict = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1E-6 , affine=__a ) __a : Optional[int] = nn.Linear(__a , __a ) # 3. Define transformers blocks __a : Tuple = nn.ModuleList( [ BasicTransformerBlock( __a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , ) for d in range(__a ) ] ) __a : Union[str, Any] = nn.Linear(__a , __a ) def __UpperCAmelCase ( self , __a , __a=None , __a=None , __a=None , __a=1 , __a=None , __a = True , ): '''simple docstring''' __a , __a , __a , __a : Tuple = hidden_states.shape __a : int = batch_frames // num_frames __a : int = hidden_states __a : int = hidden_states[None, :].reshape(__a , __a , __a , __a , __a ) __a : int = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __a : List[Any] = self.norm(__a ) __a : Any = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a ) __a : Any = self.proj_in(__a ) # 2. Blocks for block in self.transformer_blocks: __a : Any = block( __a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , ) # 3. Output __a : Union[str, Any] = self.proj_out(__a ) __a : Any = ( hidden_states[None, None, :] .reshape(__a , __a , __a , __a , __a ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __a : Union[str, Any] = hidden_states.reshape(__a , __a , __a , __a ) __a : Optional[Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=__a )
294
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowercase : str = logging.get_logger(__name__) __lowercase : List[Any] = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ): A_ = "nat" A_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __a=4 , __a=3 , __a=64 , __a=[3, 4, 6, 5] , __a=[2, 4, 8, 16] , __a=7 , __a=3.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=0.02 , __a=1E-5 , __a=0.0 , __a=None , __a=None , **__a , ): '''simple docstring''' super().__init__(**__a ) __a : Optional[Any] = patch_size __a : Union[str, Any] = num_channels __a : List[str] = embed_dim __a : Dict = depths __a : List[Any] = len(__a ) __a : Optional[Any] = num_heads __a : Union[str, Any] = kernel_size __a : List[str] = mlp_ratio __a : List[Any] = qkv_bias __a : List[str] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Any = drop_path_rate __a : Any = hidden_act __a : Dict = layer_norm_eps __a : int = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __a : str = int(embed_dim * 2 ** (len(__a ) - 1) ) __a : Optional[int] = layer_scale_init_value __a : Dict = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )] __a , __a : Optional[Any] = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names )
294
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = ProphetNetTokenizer A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() __a : Optional[Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Optional[int] = 'UNwant\u00E9d,running' __a : Optional[Any] = 'unwanted, running' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.tokenizer_class(self.vocab_file ) __a : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __a : Any = {} for i, token in enumerate(__a ): __a : int = i __a : Tuple = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __a : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __a : List[str] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102] __a : List[str] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __a : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __a : int = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __a : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __a : List[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __a : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCAmelCase_ ): A_ = (DEISMultistepScheduler,) A_ = (("num_inference_steps", 25),) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : Union[str, Any] = { 'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**__a ) return config def __UpperCAmelCase ( self , __a=0 , **__a ): '''simple docstring''' __a : Optional[int] = dict(self.forward_default_kwargs ) __a : Dict = kwargs.pop('num_inference_steps' , __a ) __a : List[Any] = self.dummy_sample __a : Tuple = 0.1 * sample __a : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a : int = self.get_scheduler_config(**__a ) __a : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals __a : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __a : Dict = scheduler_class.from_pretrained(__a ) new_scheduler.set_timesteps(__a ) # copy over dummy past residuals __a : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __a , __a : Optional[Any] = sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1 ): __a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Dict = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=0 , **__a ): '''simple docstring''' __a : Dict = dict(self.forward_default_kwargs ) __a : Tuple = kwargs.pop('num_inference_steps' , __a ) __a : Optional[Any] = self.dummy_sample __a : Any = 0.1 * sample __a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __a : Any = self.get_scheduler_config() __a : int = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals (must be after setting timesteps) __a : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __a : Any = scheduler_class.from_pretrained(__a ) # copy over dummy past residuals new_scheduler.set_timesteps(__a ) # copy over dummy past residual (must be after setting timesteps) __a : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] __a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Tuple = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self , __a=None , **__a ): '''simple docstring''' if scheduler is None: __a : str = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**__a ) __a : str = scheduler_class(**__a ) __a : Optional[int] = self.scheduler_classes[0] __a : Any = self.get_scheduler_config(**__a ) __a : Optional[int] = scheduler_class(**__a ) __a : List[Any] = 10 __a : Any = self.dummy_model() __a : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __a : Optional[Any] = model(__a , __a ) __a : Union[str, Any] = scheduler.step(__a , __a , __a ).prev_sample return sample def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = dict(self.forward_default_kwargs ) __a : Optional[int] = kwargs.pop('num_inference_steps' , __a ) for scheduler_class in self.scheduler_classes: __a : Any = self.get_scheduler_config() __a : Optional[int] = scheduler_class(**__a ) __a : List[Any] = self.dummy_sample __a : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__a , 'set_timesteps' ): scheduler.set_timesteps(__a ) elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps' ): __a : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] __a : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] __a : List[str] = scheduler.timesteps[5] __a : Optional[Any] = scheduler.timesteps[6] __a : Optional[Any] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : str = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = DEISMultistepScheduler(**self.get_scheduler_config() ) __a : Optional[int] = self.full_loop(scheduler=__a ) __a : Any = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 __a : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __a : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config ) __a : int = UniPCMultistepScheduler.from_config(scheduler.config ) __a : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) __a : str = self.full_loop(scheduler=__a ) __a : Any = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__a ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='deis' , solver_order=__a , solver_type=__a , ) def __UpperCAmelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __a : int = self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a ).any(), "Samples have nan numbers" def __UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(lower_order_final=__a ) self.check_over_configs(lower_order_final=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__a , time_step=0 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.full_loop() __a : Optional[Any] = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.23916 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.full_loop(prediction_type='v_prediction' ) __a : Any = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : str = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 ) __a : Dict = scheduler_class(**__a ) __a : Any = 10 __a : Tuple = self.dummy_model() __a : Tuple = self.dummy_sample_deter.half() scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __a : List[Any] = model(__a , __a ) __a : List[Any] = scheduler.step(__a , __a , __a ).prev_sample assert sample.dtype == torch.floataa
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __a : List[str] = mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = max( mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , ) __a : Any = val return f[i][j] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ): __a : int = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __a : str = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __a : Union[str, Any] = dp[i - 1][w_] return dp[n][w_], dp def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list ): if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) __a : Any = len(_SCREAMING_SNAKE_CASE ) if num_items != len(_SCREAMING_SNAKE_CASE ): __a : int = ( 'The number of weights must be the same as the number of values.\n' F"""But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ): __a : Any = ( 'All weights must be integers but got weight of ' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(_SCREAMING_SNAKE_CASE ) __a , __a : List[str] = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : set = set() _construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return optimal_val, example_optional_set def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: optimal_set.add(_SCREAMING_SNAKE_CASE ) _construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : Tuple = [3, 2, 4, 4] __lowercase : Any = [4, 3, 2, 3] __lowercase : Optional[Any] = 4 __lowercase : List[Any] = 6 __lowercase : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __lowercase , __lowercase : List[str] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __lowercase , __lowercase : Union[str, Any] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ): # Check if the input is valid if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[int] = equationa __a , __a , __a : List[Any] = equationa # Calculate the determinants of the matrices __a : Dict = aa * ba - aa * ba __a : Union[str, Any] = ca * ba - ca * ba __a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Union[str, Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ): __a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[Any] = model __a : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Union[str, Any] = model.module if not keep_fpaa_wrapper: __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' ) __a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ): __a : Any = forward.__wrapped__ if forward == original_forward: break __a : str = forward if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[str] = model __a : Optional[int] = compiled_model return model def lowerCamelCase (): PartialState().wait_for_everyone() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ): for key, value in kwargs.items(): __a : Optional[int] = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = value return destination def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ): if port is None: __a : List[str] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Optional[int] = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
294
1
'''simple docstring''' import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __lowercase : Union[str, Any] = 'Enter the base and the power separated by a comma: ' __lowercase , __lowercase : str = map(int, input(prompt).split(',')) __lowercase , __lowercase : List[str] = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. __lowercase : Optional[int] = res(xa, ya) __lowercase : Dict = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
294
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __UpperCamelCase : A_ = 42 A_ = None A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ): # Validation def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_SCREAMING_SNAKE_CASE ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( _SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _SCREAMING_SNAKE_CASE ) ) return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): return int((input_a, input_a).count(0 ) != 0 ) def lowerCamelCase (): assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def lowerCamelCase (_SCREAMING_SNAKE_CASE : Iterable[str] , _SCREAMING_SNAKE_CASE : int ): __a : Optional[int] = iter(_SCREAMING_SNAKE_CASE ) while True: __a : List[str] = tuple(itertools.islice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if not chunk: return yield chunk def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : int = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) __a : Optional[Any] = '' if len(_SCREAMING_SNAKE_CASE ) < 2: return dirty for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_SCREAMING_SNAKE_CASE ) & 1: clean += "X" return clean def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) __a : int = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __a : int = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_SCREAMING_SNAKE_CASE ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_SCREAMING_SNAKE_CASE ) return table def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : Optional[Any] = generate_table(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = prepare_input(_SCREAMING_SNAKE_CASE ) __a : int = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ): __a , __a : int = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 ) __a , __a : Optional[int] = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : str = generate_table(_SCREAMING_SNAKE_CASE ) __a : Optional[Any] = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_SCREAMING_SNAKE_CASE , 2 ): __a , __a : int = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 ) __a , __a : Tuple = divmod(table.index(_SCREAMING_SNAKE_CASE ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
294
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowercase : Optional[Any] = True except (ImportError, ModuleNotFoundError): __lowercase : Dict = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
294
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __lowercase : Any = [0, 25, 50] __lowercase : int = [25, 50, 75] __lowercase : List[str] = fuzz.membership.trimf(X, abca) __lowercase : Any = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __lowercase : List[Any] = np.ones(75) __lowercase : Any = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __lowercase : str = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __lowercase : Optional[Any] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __lowercase : str = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
294
1
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __UpperCamelCase : @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.get_dummy_input() @property def __UpperCAmelCase ( self ): '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" ) def __UpperCAmelCase ( self , __a=True , __a=False , __a=False , __a=False , ): '''simple docstring''' __a : List[str] = 4 __a : Tuple = 32 __a : Optional[int] = (32, 32) __a : str = torch.manual_seed(0 ) __a : Union[str, Any] = torch.device(__a ) __a : int = (batch_size, num_channels) + sizes __a : str = randn_tensor(__a , generator=__a , device=__a ) __a : int = {'hidden_states': hidden_states} if include_temb: __a : Optional[Any] = 128 __a : Dict = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a ) if include_res_hidden_states_tuple: __a : Dict = torch.manual_seed(1 ) __a : str = (randn_tensor(__a , generator=__a , device=__a ),) if include_encoder_hidden_states: __a : List[Any] = floats_tensor((batch_size, 32, 32) ).to(__a ) if include_skip_sample: __a : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a ) return dummy_input def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": __a : Union[str, Any] = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) __a : List[str] = self.dummy_input return init_dict, inputs_dict def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a , __a : Union[str, Any] = self.prepare_init_args_and_inputs_for_common() __a : int = self.block_class(**__a ) unet_block.to(__a ) unet_block.eval() with torch.no_grad(): __a : Tuple = unet_block(**__a ) if isinstance(__a , __a ): __a : Union[str, Any] = output[0] self.assertEqual(output.shape , self.output_shape ) __a : str = output[0, -1, -3:, -3:] __a : Optional[Any] = torch.tensor(__a ).to(__a ) assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Optional[int] = self.prepare_init_args_and_inputs_for_common() __a : Optional[Any] = self.block_class(**__a ) model.to(__a ) model.train() __a : Any = model(**__a ) if isinstance(__a , __a ): __a : Optional[int] = output[0] __a : Any = torch.device(__a ) __a : Union[str, Any] = randn_tensor(output.shape , device=__a ) __a : Union[str, Any] = torch.nn.functional.mse_loss(__a , __a ) loss.backward()
294
'''simple docstring''' import sys __lowercase : Union[str, Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[str] = 1 for digit in s: product *= int(_SCREAMING_SNAKE_CASE ) return product def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ): __a : Optional[int] = -sys.maxsize - 1 __a : Optional[Any] = n[:13] __a : int = 13 while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): __a : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: __a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) ) __a : Optional[Any] = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
294
1
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=24 , __a=2 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=None , __a=1000 , ): '''simple docstring''' __a : str = parent __a : Optional[int] = batch_size __a : List[Any] = seq_length __a : Dict = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Optional[int] = use_labels __a : List[str] = vocab_size __a : Dict = hidden_size __a : Union[str, Any] = num_hidden_layers __a : List[str] = num_attention_heads __a : Any = intermediate_size __a : Any = hidden_act __a : Optional[Any] = hidden_dropout_prob __a : int = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : str = type_vocab_size __a : Dict = type_sequence_label_size __a : str = initializer_range __a : List[Any] = num_labels __a : str = scope __a : Tuple = range_bbox def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : Optional[int] = bbox[i, j, 3] __a : Optional[int] = bbox[i, j, 1] __a : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: __a : Optional[int] = bbox[i, j, 2] __a : Optional[Any] = bbox[i, j, 0] __a : int = t __a : Any = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : List[Any] = None if self.use_token_type_ids: __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Tuple = None __a : int = None if self.use_labels: __a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : str = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Optional[int] = LiltModel(config=__a ) model.to(__a ) model.eval() __a : Any = model(__a , bbox=__a , attention_mask=__a , token_type_ids=__a ) __a : List[str] = model(__a , bbox=__a , token_type_ids=__a ) __a : Any = model(__a , bbox=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : Optional[int] = self.num_labels __a : str = LiltForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model( __a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , ): '''simple docstring''' __a : str = LiltForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : int = model( __a , bbox=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Tuple = config_and_inputs __a : int = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = LiltModelTester(self ) __a : Union[str, Any] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Dict = type self.model_tester.create_and_check_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[int] = LiltModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @slow class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(__a ) __a : List[Any] = torch.tensor([[1, 2]] , device=__a ) __a : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__a ) # forward pass with torch.no_grad(): __a : Union[str, Any] = model(input_ids=__a , bbox=__a ) __a : Union[str, Any] = torch.Size([1, 2, 768] ) __a : Dict = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__a , ) self.assertTrue(outputs.last_hidden_state.shape , __a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __a , atol=1E-3 ) )
294
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = CodeGenTokenizer A_ = CodeGenTokenizerFast A_ = True A_ = {"add_prefix_space": True} A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : Dict = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'lower newer' __a : Tuple = 'lower newer' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __a : str = 'lower newer' __a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) __a : List[str] = tokens + [tokenizer.unk_token] __a : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Any = 'lower newer' # Testing tokenization __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) __a : Dict = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a ) __a : int = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token __a : Any = tokens + [rust_tokenizer.unk_token] __a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input __a : List[Any] = 'This is a simple input' __a : Tuple = ['This is a simple input 1', 'This is a simple input 2'] __a : Tuple = ('This is a simple input', 'This is a pair') __a : str = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __a : str = 'This is a simple input' __a : Any = ['This is a simple input looooooooong', 'This is a simple input'] __a : Optional[int] = ('This is a simple input', 'This is a pair') __a : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __a : int = tokenizer.pad_token_id __a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' ) __a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) __a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' ) __a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = '$$$' __a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) __a : Union[str, Any] = 'This is a simple input' __a : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] __a : List[Any] = tokenizer.bos_token_id __a : List[str] = tokenizer(__a ) __a : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __a : Any = tokenizer.decode(out_s.input_ids ) __a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) __a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' __a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b' __a : Optional[int] = tokenizer.encode(__a ) __a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] __a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
1
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCAmelCase_ ): A_ = (IPNDMScheduler,) A_ = (("num_inference_steps", 50),) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : List[Any] = {'num_train_timesteps': 1000} config.update(**__a ) return config def __UpperCAmelCase ( self , __a=0 , **__a ): '''simple docstring''' __a : Any = dict(self.forward_default_kwargs ) __a : Optional[Any] = kwargs.pop('num_inference_steps' , __a ) __a : str = self.dummy_sample __a : List[Any] = 0.1 * sample __a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a : List[str] = self.get_scheduler_config(**__a ) __a : str = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals __a : Dict = dummy_past_residuals[:] if time_step is None: __a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __a : str = scheduler_class.from_pretrained(__a ) new_scheduler.set_timesteps(__a ) # copy over dummy past residuals __a : Any = dummy_past_residuals[:] __a : str = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : int = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Tuple = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=0 , **__a ): '''simple docstring''' __a : Any = dict(self.forward_default_kwargs ) __a : Union[str, Any] = kwargs.pop('num_inference_steps' , __a ) __a : Any = self.dummy_sample __a : List[Any] = 0.1 * sample __a : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __a : Union[str, Any] = self.get_scheduler_config() __a : int = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals (must be after setting timesteps) __a : Any = dummy_past_residuals[:] if time_step is None: __a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __a : int = scheduler_class.from_pretrained(__a ) # copy over dummy past residuals new_scheduler.set_timesteps(__a ) # copy over dummy past residual (must be after setting timesteps) __a : str = dummy_past_residuals[:] __a : Optional[Any] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : int = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __a : Dict = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Optional[int] = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __UpperCAmelCase ( self , **__a ): '''simple docstring''' __a : int = self.scheduler_classes[0] __a : List[Any] = self.get_scheduler_config(**__a ) __a : List[str] = scheduler_class(**__a ) __a : int = 10 __a : Any = self.dummy_model() __a : int = self.dummy_sample_deter scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __a : Optional[int] = model(__a , __a ) __a : Optional[Any] = scheduler.step(__a , __a , __a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __a : Dict = model(__a , __a ) __a : List[Any] = scheduler.step(__a , __a , __a ).prev_sample return sample def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = dict(self.forward_default_kwargs ) __a : Optional[Any] = kwargs.pop('num_inference_steps' , __a ) for scheduler_class in self.scheduler_classes: __a : Union[str, Any] = self.get_scheduler_config() __a : Tuple = scheduler_class(**__a ) __a : List[Any] = self.dummy_sample __a : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__a , 'set_timesteps' ): scheduler.set_timesteps(__a ) elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps' ): __a : Optional[int] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __a : Any = dummy_past_residuals[:] __a : Optional[Any] = scheduler.timesteps[5] __a : Union[str, Any] = scheduler.timesteps[6] __a : Optional[int] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Optional[int] = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample __a : Tuple = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__a , time_step=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__a , time_step=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.full_loop() __a : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 254_0529 ) < 10
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('iterations must be defined as integers' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) __a : Dict = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(_SCREAMING_SNAKE_CASE ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
294
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {'height': 18, 'width': 18} __a : int = parent __a : Dict = batch_size __a : Optional[int] = num_channels __a : List[Any] = image_size __a : Tuple = min_resolution __a : str = max_resolution __a : str = do_resize __a : Optional[Any] = size __a : str = apply_ocr def __UpperCAmelCase ( self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = LayoutLMvaImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , 'do_resize' ) ) self.assertTrue(hasattr(__a , 'size' ) ) self.assertTrue(hasattr(__a , 'apply_ocr' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) __a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , __a ) self.assertIsInstance(encoding.boxes , __a ) # Test batched __a : Any = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched __a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset __a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) __a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' ) __a : Optional[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 __a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a ) self.assertListEqual(encoding.boxes , __a ) # with apply_OCR = False __a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a ) __a : List[Any] = image_processing(__a , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
294
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "WhisperFeatureExtractor" A_ = "WhisperTokenizer" def __init__( self , __a , __a ): '''simple docstring''' super().__init__(__a , __a ) __a : List[Any] = self.feature_extractor __a : str = False def __UpperCAmelCase ( self , __a=None , __a=None , __a=True ): '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a ) def __call__( self , *__a , **__a ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__a , **__a ) __a : str = kwargs.pop('audio' , __a ) __a : Union[str, Any] = kwargs.pop('sampling_rate' , __a ) __a : List[str] = kwargs.pop('text' , __a ) if len(__a ) > 0: __a : List[str] = args[0] __a : str = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __a : Tuple = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) if text is not None: __a : Optional[Any] = self.tokenizer(__a , **__a ) if text is None: return inputs elif audio is None: return encodings else: __a : Any = encodings['input_ids'] return inputs def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) def __UpperCAmelCase ( self , __a , __a="np" ): '''simple docstring''' return self.tokenizer.get_prompt_ids(__a , return_tensors=__a )
294
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase : List[Any] = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "ernie_m" A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , **__a ) __a : int = vocab_size __a : Dict = hidden_size __a : str = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : Union[str, Any] = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : int = initializer_range __a : Dict = layer_norm_eps __a : int = classifier_dropout __a : Dict = is_decoder __a : int = act_dropout
294
1
'''simple docstring''' import unittest import numpy as np def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray | None = None , ): __a : List[Any] = np.shape(_SCREAMING_SNAKE_CASE ) __a : Any = np.shape(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = np.shape(_SCREAMING_SNAKE_CASE ) if shape_a[0] != shape_b[0]: __a : str = ( 'Expected the same number of rows for A and B. ' F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if shape_b[1] != shape_c[1]: __a : int = ( 'Expected the same number of columns for B and C. ' F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) __a : int = pseudo_inv if a_inv is None: try: __a : List[Any] = np.linalg.inv(_SCREAMING_SNAKE_CASE ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __a : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] ) __a : Any = np.array([[2, 1], [6, 3]] ) __a : List[Any] = schur_complement(__a , __a , __a ) __a : Dict = np.block([[a, b], [b.T, c]] ) __a : List[str] = np.linalg.det(__a ) __a : List[str] = np.linalg.det(__a ) __a : List[Any] = np.linalg.det(__a ) self.assertAlmostEqual(__a , det_a * det_s ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __a : int = np.array([[0, 3], [3, 0], [2, 3]] ) __a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__a ): schur_complement(__a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) __a : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__a ): schur_complement(__a , __a , __a ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a ): '''simple docstring''' super().__init__() __a : int = module __a : List[Any] = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module A_ = "bigscience/bloom-1b7" # Constant values A_ = 2.109659552692574 A_ = "Hello my name is" A_ = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) A_ = 10 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = AutoTokenizer.from_pretrained(self.model_name ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Models and tokenizer __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_abit.config self.assertTrue(hasattr(__a , 'quantization_config' ) ) __a : Union[str, Any] = config.to_dict() __a : Tuple = config.to_diff_dict() __a : Tuple = config.to_json_string() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit __a : List[Any] = self.model_fpaa.get_memory_footprint() __a : List[Any] = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __a : Tuple = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = BitsAndBytesConfig() __a : Tuple = True __a : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ) __a : List[Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BitsAndBytesConfig() with self.assertRaises(__a ): __a : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) __a : Optional[int] = self.model_fpaa.to(torch.floataa ) __a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __a : List[Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.half() # Check this does not throw an error __a : Union[str, Any] = self.model_fpaa.float() def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def __UpperCAmelCase ( cls ): '''simple docstring''' __a : Any = 't5-small' __a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __a : int = AutoTokenizer.from_pretrained(cls.model_name ) __a : Union[str, Any] = 'Translate in German: Hello, my dog is cute' def __UpperCAmelCase ( self ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from transformers import TaForConditionalGeneration __a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules __a : List[str] = None # test with `t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : Any = model.generate(**__a ) # test with `flan-t5-small` __a : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[Any] = model.generate(**__a ) __a : Optional[int] = modules def __UpperCAmelCase ( self ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : List[str] = model.generate(**__a ) # test with `flan-t5-small` __a : List[Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='auto' ) __a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __a : int = model.generate(**__a ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # model_name __a : List[Any] = 'bigscience/bloom-560m' __a : Union[str, Any] = 't5-small' # Different types of model __a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Sequence classification model __a : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='auto' ) # CausalLM model __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' ) # Seq2seq model __a : Any = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='auto' ) def __UpperCAmelCase ( self ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __a : str = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 'facebook/opt-350m' super().setUp() def __UpperCAmelCase ( self ): '''simple docstring''' if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __a : Tuple = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __a : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __a : str = LoRALayer(module.q_proj , rank=16 ) __a : str = LoRALayer(module.k_proj , rank=16 ) __a : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __a : int = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "gpt2-xl" A_ = 3.3191854854152187
294
1
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = ['a', 'b', 'c'] # Defaults to last layer if both are None __a , __a : int = get_aligned_output_features_output_indices(__a , __a , __a ) self.assertEqual(__a , ['c'] ) self.assertEqual(__a , [2] ) # Out indices set to match out features __a , __a : Any = get_aligned_output_features_output_indices(['a', 'c'] , __a , __a ) self.assertEqual(__a , ['a', 'c'] ) self.assertEqual(__a , [0, 2] ) # Out features set to match out indices __a , __a : str = get_aligned_output_features_output_indices(__a , [0, 2] , __a ) self.assertEqual(__a , ['a', 'c'] ) self.assertEqual(__a , [0, 2] ) # Out features selected from negative indices __a , __a : Any = get_aligned_output_features_output_indices(__a , [-3, -1] , __a ) self.assertEqual(__a , ['a', 'c'] ) self.assertEqual(__a , [-3, -1] ) def __UpperCAmelCase ( self ): '''simple docstring''' with self.assertRaises(__a ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , __a ) # Out features must be a list with self.assertRaises(__a ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(__a ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(__a ): verify_out_features_out_indices(__a , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(__a ): verify_out_features_out_indices(__a , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(__a ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(__a ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(__a ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = BackboneMixin() __a : Optional[Any] = ['a', 'b', 'c'] __a : Optional[Any] = ['a', 'c'] __a : Tuple = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly __a : Optional[Any] = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) __a : List[Any] = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
294
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCAmelCase_ ): A_ = None A_ = None A_ = None A_ = None class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ): '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __a : Any = project_dim __a : Optional[Any] = pooler_fn __a : int = learn_encoder __a : str = use_attention_mask class __UpperCamelCase ( lowerCAmelCase_ ): A_ = [r"pooler", r"logit_scale"] A_ = [r"position_ids", r"predictions.decoder.bias"] A_ = "roberta" A_ = RobertaSeriesConfig def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Optional[Any] = XLMRobertaModel(__a ) __a : str = nn.Linear(config.hidden_size , config.project_dim ) __a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a ) if self.has_pre_transformation: __a : int = nn.Linear(config.hidden_size , config.project_dim ) __a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Tuple = self.base_model( input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , ) if self.has_pre_transformation: __a : Optional[Any] = outputs['hidden_states'][-2] __a : Optional[int] = self.pre_LN(__a ) __a : Union[str, Any] = self.transformation_pre(__a ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __a : Optional[Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
294
1
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __UpperCamelCase ( lowerCAmelCase_ ): @staticmethod def __UpperCAmelCase ( __a ): '''simple docstring''' __a : Dict = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=__a , default=__a , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=__a , help='Name of the model to download' ) download_parser.set_defaults(func=__a ) def __init__( self , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = model __a : List[str] = cache __a : Any = force __a : Optional[Any] = trust_remote_code def __UpperCAmelCase ( self ): '''simple docstring''' from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase : Union[str, Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowercase : Optional[int] = logging.get_logger(__name__) __lowercase : Optional[int] = {'vocab_file': 'vocab.json'} __lowercase : Union[str, Any] = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __lowercase : Union[str, Any] = {'mgp-str': 27} class __UpperCamelCase ( lowerCAmelCase_ ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __a , __a="[GO]" , __a="[GO]" , __a="[s]" , __a="[GO]" , **__a ): '''simple docstring''' super().__init__( unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a , ) with open(__a , encoding='utf-8' ) as vocab_handle: __a : Union[str, Any] = json.load(__a ) __a : Any = {v: k for k, v in self.vocab.items()} @property def __UpperCAmelCase ( self ): '''simple docstring''' return len(self.vocab ) def __UpperCAmelCase ( self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : str = [] for s in text: char_tokens.extend(__a ) return char_tokens def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.vocab.get(__a , self.vocab.get(self.unk_token ) ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.decoder.get(__a ) def __UpperCAmelCase ( self , __a , __a = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error('Vocabulary path ({}) should be a directory'.format(__a ) ) return __a : List[Any] = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(__a , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' ) return (vocab_file,)
294
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
1
'''simple docstring''' import math def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ): return math.pow(_SCREAMING_SNAKE_CASE , 2 ) - a def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ): return 2 * x def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ): __a : Tuple = 2.0 while start <= a: __a : Optional[Any] = math.pow(_SCREAMING_SNAKE_CASE , 2 ) return start def lowerCamelCase (_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 9_999 , _SCREAMING_SNAKE_CASE : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ): if a < 0: raise ValueError('math domain error' ) __a : Optional[Any] = get_initial_point(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): __a : str = value __a : List[Any] = value - fx(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / fx_derivative(_SCREAMING_SNAKE_CASE ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
294
'''simple docstring''' import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __lowercase : str = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ): __a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse! __a : Optional[Any] = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } __a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ) __a : Dict = BeautifulSoup(html.text , 'html.parser' ) __a : List[str] = ''.join( re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) __a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE ) __a : List[str] = json.loads(_SCREAMING_SNAKE_CASE ) __a : List[Any] = re.findall( r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , ) if not matched_google_image_data: return 0 __a : Tuple = re.sub( r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , ) __a : Optional[Any] = re.findall( r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , ) for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ): if index >= max_images: return index __a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode( 'unicode-escape' ) __a : Dict = urllib.request.build_opener() __a : Union[str, Any] = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_SCREAMING_SNAKE_CASE ) __a : List[Any] = F"""query_{query.replace(" " , "_" )}""" if not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) urllib.request.urlretrieve( # noqa: S310 _SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: __lowercase : Optional[int] = download_images_from_google_query(sys.argv[1]) print(f'''{image_count} images were downloaded to disk.''') except IndexError: print('Please provide a search term.') raise
294
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase_ ) class __UpperCamelCase ( lowerCAmelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization A_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) A_ = Features({"text": Value("string" )} ) A_ = Features({"labels": ClassLabel} ) A_ = "text" A_ = "labels" def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __a ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) __a : Dict = copy.deepcopy(self ) __a : Optional[Any] = self.label_schema.copy() __a : int = features[self.label_column] __a : int = label_schema return task_template @property def __UpperCAmelCase ( self ): '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
294
'''simple docstring''' import os def lowerCamelCase (): with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __a : List[Any] = str(file.readlines()[0] ) __a : str = names.replace('"' , '' ).split(',' ) names.sort() __a : Union[str, Any] = 0 __a : Tuple = 0 for i, name in enumerate(_SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(_SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __a : Any = 0 return total_score if __name__ == "__main__": print(solution())
294
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def lowerCamelCase (): __a : Dict = HfArgumentParser(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = parser.parse_args_into_dataclasses()[0] __a : Tuple = TensorFlowBenchmark(args=_SCREAMING_SNAKE_CASE ) try: __a : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __a : List[Any] = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' __a : Optional[int] = ' '.join(str(_SCREAMING_SNAKE_CASE ).split(' ' )[:-1] ) __a : Optional[int] = '' __a : List[Any] = eval(str(_SCREAMING_SNAKE_CASE ).split(' ' )[-1] ) __a : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __a : Optional[int] = full_error_msg + begin_error_msg + str(_SCREAMING_SNAKE_CASE ) raise ValueError(_SCREAMING_SNAKE_CASE ) benchmark.run() if __name__ == "__main__": main()
294
'''simple docstring''' __lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __lowercase : List[str] = ['a', 'b', 'c', 'd', 'e'] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = start # add current to visited visited.append(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(_SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: __a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": __lowercase : Union[str, Any] = topological_sort('a', [], []) print(sort)
294
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase (): __a : Tuple = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' __a : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) return image def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : Any = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ): __a : Optional[Any] = dct.pop(_SCREAMING_SNAKE_CASE ) __a : List[str] = val def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __a : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) __a : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict __a : Tuple = torch.cat((q_bias, torch.zeros_like(_SCREAMING_SNAKE_CASE , requires_grad=_SCREAMING_SNAKE_CASE ), v_bias) ) __a : Union[str, Any] = qkv_bias def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): __a : int = 364 if 'coco' in model_name else 224 __a : Union[str, Any] = InstructBlipVisionConfig(image_size=_SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: __a : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __a : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: __a : List[Any] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: __a : Tuple = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32_001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 __a : Dict = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() __a : Optional[Any] = InstructBlipConfig(vision_config=_SCREAMING_SNAKE_CASE , text_config=_SCREAMING_SNAKE_CASE , qformer_config=_SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=False ): __a : int = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: __a : List[str] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) __a : int = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) __a , __a : Optional[int] = get_blipa_config(_SCREAMING_SNAKE_CASE ) __a : Tuple = InstructBlipForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval() __a : Tuple = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } __a , __a : str = model_name_to_original[model_name] # load original model print('Loading original model...' ) __a : Any = 'cuda:1' if torch.cuda.is_available() else 'cpu' __a : Union[str, Any] = 'cuda:2' if torch.cuda.is_available() else 'cpu' __a , __a , __a : str = load_model_and_preprocess( name=_SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , is_eval=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) original_model.eval() print('Done!' ) # update state dict keys __a : int = original_model.state_dict() __a : int = create_rename_keys(_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __a : int = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith('Qformer.bert' ): __a : List[Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __a : Union[str, Any] = key.replace('self' , 'attention' ) if "llm_proj" in key: __a : List[str] = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: __a : Dict = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): __a : Dict = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): __a : Dict = key.replace('t5' , 'language' ) __a : Any = val # read in qv biases read_in_q_v_bias(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) __a : str = load_demo_image() __a : str = 'What is unusual about this image?' # create processor __a : int = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE ) __a : List[Any] = InstructBlipProcessor( image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , qformer_tokenizer=_SCREAMING_SNAKE_CASE , ) __a : Dict = processor(images=_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values __a : List[Any] = vis_processors['eval'](_SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(_SCREAMING_SNAKE_CASE ) __a : int = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _SCREAMING_SNAKE_CASE ) original_model.to(_SCREAMING_SNAKE_CASE ) hf_model.to(_SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "vicuna" in model_name: __a : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits __a : Any = hf_model(**_SCREAMING_SNAKE_CASE ).logits else: __a : Tuple = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits __a : int = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_SCREAMING_SNAKE_CASE ) __a : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) __a : Optional[int] = hf_model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape __a : Dict = 1e-4 if 'vicuna' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) print('Looks ok!' ) print('Generating with original model...' ) __a : Dict = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) __a : Optional[int] = hf_model.generate( **_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? __a : List[str] = 2 print('Original generation:' , _SCREAMING_SNAKE_CASE ) __a : int = processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) __a : Tuple = [text.strip() for text in output_text] print('HF generation:' , _SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": __lowercase : List[Any] = argparse.ArgumentParser() __lowercase : Union[str, Any] = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) __lowercase : List[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
294
'''simple docstring''' def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __UpperCamelCase ( nn.Module ): def __init__( self ): '''simple docstring''' super().__init__() __a : List[str] = nn.Linear(3 , 4 ) __a : Optional[int] = nn.BatchNormad(4 ) __a : int = nn.Linear(4 , 5 ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a , *__a , **__a ): '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class __UpperCamelCase ( lowerCAmelCase_ ): def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' return output + 1 class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = ModelForTest() __a : Any = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , '_hf_hook' ) ) self.assertFalse(hasattr(__a , '_old_forward' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = ModelForTest() __a : int = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , '_hf_hook' ) ) self.assertFalse(hasattr(__a , '_old_forward' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = ModelForTest() __a : Optional[Any] = torch.randn(2 , 3 ) __a : str = test_model(x + 1 ) __a : List[Any] = test_model(x + 2 ) __a : Optional[int] = PreForwardHook() add_hook_to_module(__a , __a ) __a : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __a : Optional[int] = PreForwardHook() add_hook_to_module(__a , __a ) __a : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __a : Optional[Any] = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) __a : List[str] = test_model(__a ) assert torch.allclose(__a , __a , atol=1E-5 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ModelForTest() __a : int = torch.randn(2 , 3 ) __a : List[str] = test_model(__a ) __a : Dict = PostForwardHook() add_hook_to_module(__a , __a ) __a : Union[str, Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __a : Any = PostForwardHook() add_hook_to_module(__a , __a ) __a : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __a : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) __a : Optional[Any] = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1E-5 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ModelForTest() __a : List[Any] = torch.randn(2 , 3 ) __a : Union[str, Any] = test_model(__a ) __a : Union[str, Any] = PostForwardHook() add_hook_to_module(__a , __a ) __a : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __a : Dict = True __a : Tuple = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __a : Union[str, Any] = torch.randn(2 , 3 ) __a : Any = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) __a : List[str] = torch.randn(2 , 3 ).to(0 ) __a : Any = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __a : List[Any] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __a : List[Any] = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) __a : Optional[int] = torch.randn(2 , 3 ) __a : Tuple = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __a : List[Any] = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __a : Tuple = torch.randn(2 , 3 ) __a : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __a : str = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __a : Optional[int] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) __a : int = torch.randn(2 , 3 ) __a : List[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __a : int = torch.randn(2 , 3 ) __a : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __a : Optional[int] = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __a : Union[str, Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) __a : Tuple = torch.randn(2 , 3 ) __a : List[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __a : Optional[Any] = torch.randn(2 , 3 ) __a : List[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
294
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : str = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Any = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : List[str] = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
1
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a , __a , __a = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=__a , vae=__a , scheduler=__a ) # create a imagenet -> id dictionary for easier use __a : List[Any] = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): __a : str = int(__a ) __a : Union[str, Any] = dict(sorted(self.labels.items() ) ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if not isinstance(__a , __a ): __a : Optional[Any] = list(__a ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ): '''simple docstring''' __a : Optional[int] = len(__a ) __a : str = self.transformer.config.sample_size __a : Optional[int] = self.transformer.config.in_channels __a : Dict = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , ) __a : Optional[int] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents __a : Optional[Any] = torch.tensor(__a , device=self.device ).reshape(-1 ) __a : Dict = torch.tensor([1000] * batch_size , device=self.device ) __a : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__a ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: __a : Tuple = latent_model_input[: len(__a ) // 2] __a : int = torch.cat([half, half] , dim=0 ) __a : Any = self.scheduler.scale_model_input(__a , __a ) __a : Union[str, Any] = t if not torch.is_tensor(__a ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) __a : List[str] = latent_model_input.device.type == 'mps' if isinstance(__a , __a ): __a : Tuple = torch.floataa if is_mps else torch.floataa else: __a : Optional[int] = torch.intaa if is_mps else torch.intaa __a : List[str] = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: __a : Union[str, Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a : List[Any] = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output __a : List[str] = self.transformer( __a , timestep=__a , class_labels=__a ).sample # perform guidance if guidance_scale > 1: __a , __a : int = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] __a , __a : Union[str, Any] = torch.split(__a , len(__a ) // 2 , dim=0 ) __a : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps) __a : Tuple = torch.cat([half_eps, half_eps] , dim=0 ) __a : List[str] = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: __a , __a : Optional[Any] = torch.split(__a , __a , dim=1 ) else: __a : Optional[int] = noise_pred # compute previous image: x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__a , __a , __a ).prev_sample if guidance_scale > 1: __a , __a : List[Any] = latent_model_input.chunk(2 , dim=0 ) else: __a : Union[str, Any] = latent_model_input __a : int = 1 / self.vae.config.scaling_factor * latents __a : str = self.vae.decode(__a ).sample __a : Union[str, Any] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __a : List[str] = self.numpy_to_pil(__a ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__a )
294
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowercase : Dict = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase : Optional[Any] = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ): '''simple docstring''' __a : int = parent __a : Union[str, Any] = batch_size __a : Optional[int] = seq_length __a : List[str] = is_training __a : Any = use_input_mask __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : List[str] = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : str = num_attention_heads __a : Optional[int] = intermediate_size __a : Tuple = hidden_act __a : Union[str, Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : Dict = type_vocab_size __a : Any = type_sequence_label_size __a : Dict = initializer_range __a : Optional[Any] = num_labels __a : Optional[Any] = num_choices __a : Union[str, Any] = relative_attention __a : List[str] = position_biased_input __a : List[Any] = pos_att_type __a : Tuple = scope def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = None if self.use_input_mask: __a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __a : Any = None if self.use_token_type_ids: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[int] = None __a : int = None __a : Dict = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __a : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = DebertaVaModel(config=__a ) model.to(__a ) model.eval() __a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0] __a : str = model(__a , token_type_ids=__a )[0] __a : Optional[int] = model(__a )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : int = DebertaVaForMaskedLM(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[Any] = self.num_labels __a : List[Any] = DebertaVaForSequenceClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__a ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = DebertaVaForTokenClassification(config=__a ) model.to(__a ) model.eval() __a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[str] = DebertaVaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __a : str = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Optional[int] = DebertaVaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : int = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = config_and_inputs __a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) A_ = ( { "feature-extraction": DebertaVaModel, "fill-mask": DebertaVaForMaskedLM, "question-answering": DebertaVaForQuestionAnswering, "text-classification": DebertaVaForSequenceClassification, "token-classification": DebertaVaForTokenClassification, "zero-shot": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) A_ = True A_ = False A_ = False A_ = False A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = DebertaVaModelTester(self ) __a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*__a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : str = DebertaVaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __UpperCAmelCase ( self ): '''simple docstring''' pass @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' ) __a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __a : int = model(__a , attention_mask=__a )[0] # compare the actual values for a slice. __a : str = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
294
1
'''simple docstring''' import os import sys import unittest __lowercase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers') class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = find_backend(' if not is_torch_available():' ) self.assertEqual(__a , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __a : str = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(__a , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __a : Optional[int] = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(__a , 'torch_and_transformers_and_onnx' ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , __a ) self.assertIn('torch_and_transformers' , __a ) self.assertIn('flax_and_transformers' , __a ) self.assertIn('torch_and_transformers_and_onnx' , __a ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(__a , '\nCONSTANT = None\n' ) __a : Any = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( __a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) __a : List[Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' __a : Union[str, Any] = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' __a : Optional[int] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , __a )
294
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ): __a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[Any] = model __a : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Union[str, Any] = model.module if not keep_fpaa_wrapper: __a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' ) __a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ): __a : Any = forward.__wrapped__ if forward == original_forward: break __a : str = forward if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: __a : List[str] = model __a : Optional[int] = compiled_model return model def lowerCamelCase (): PartialState().wait_for_everyone() def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ): if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ): for key, value in kwargs.items(): __a : Optional[int] = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ): if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): __a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ): for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: __a : Tuple = value return destination def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ): if port is None: __a : List[str] = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
294
1
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __UpperCamelCase : def __init__( self , __a , __a=99 , __a=13 , __a=16 , __a=7 , __a=True , __a=True , __a=True , __a=False , __a=True , __a=2 , __a=32 , __a=4 , __a=4 , __a=30 , __a=0 , __a=1 , __a=2 , __a=None , ): '''simple docstring''' __a : Optional[Any] = parent __a : Optional[int] = batch_size __a : List[Any] = decoder_seq_length # For common tests __a : Tuple = self.decoder_seq_length __a : Any = is_training __a : Dict = use_attention_mask __a : Any = use_labels __a : Tuple = vocab_size __a : List[str] = d_model __a : Optional[int] = d_model __a : int = decoder_layers __a : int = decoder_layers __a : Optional[Any] = decoder_ffn_dim __a : Union[str, Any] = decoder_attention_heads __a : Dict = decoder_attention_heads __a : Optional[Any] = eos_token_id __a : Optional[Any] = bos_token_id __a : str = pad_token_id __a : Tuple = decoder_start_token_id __a : Tuple = use_cache __a : Optional[Any] = max_position_embeddings __a : List[str] = None __a : int = decoder_seq_length __a : Dict = 2 __a : Dict = 1 def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __a : Any = None if self.use_attention_mask: __a : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __a : int = None if self.use_labels: __a : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __a : Any = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCAmelCase ( self , __a , __a , __a , __a , ): '''simple docstring''' __a : Optional[Any] = True __a : str = TrOCRDecoder(config=__a ).to(__a ).eval() __a : List[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __a : Any = model(__a , use_cache=__a ) __a : List[Any] = model(__a ) __a : Optional[Any] = model(__a , use_cache=__a ) self.parent.assertTrue(len(__a ) == len(__a ) ) self.parent.assertTrue(len(__a ) == len(__a ) + 1 ) __a : Tuple = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids __a : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __a : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __a : Tuple = model(__a )['last_hidden_state'] __a : Dict = model(__a , past_key_values=__a )['last_hidden_state'] # select random slice __a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a : Union[str, Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __a : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__a , __a , atol=1E-3 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.prepare_config_and_inputs() __a , __a , __a , __a : int = config_and_inputs __a : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A_ = (TrOCRForCausalLM,) if is_torch_available() else () A_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} A_ = True A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=__a ) __a : str = ConfigTester(self , config_class=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' pass def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__a ) def __UpperCAmelCase ( self ): '''simple docstring''' return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
294
1
'''simple docstring''' from typing import Any def lowerCamelCase (_SCREAMING_SNAKE_CASE : list ): if not input_list: return [] __a : Union[str, Any] = [input_list.count(_SCREAMING_SNAKE_CASE ) for value in input_list] __a : Tuple = max(_SCREAMING_SNAKE_CASE ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_SCREAMING_SNAKE_CASE ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
294
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __UpperCamelCase : A_ = 42 A_ = None A_ = None def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ): # Validation def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_SCREAMING_SNAKE_CASE ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( _SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _SCREAMING_SNAKE_CASE ) ) return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
294
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __lowercase : str = logging.get_logger(__name__) # General docstring __lowercase : List[str] = 'MobileNetV1Config' # Base docstring __lowercase : Tuple = 'google/mobilenet_v1_1.0_224' __lowercase : List[Any] = [1, 10_24, 7, 7] # Image classification docstring __lowercase : int = 'google/mobilenet_v1_1.0_224' __lowercase : Any = 'tabby, tabby cat' __lowercase : Dict = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ): __a : Dict = {} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Optional[Any] = model.mobilenet_va else: __a : List[Any] = model __a : Dict = 'MobilenetV1/Conv2d_0/' __a : Dict = backbone.conv_stem.convolution.weight __a : Optional[Any] = backbone.conv_stem.normalization.bias __a : int = backbone.conv_stem.normalization.weight __a : int = backbone.conv_stem.normalization.running_mean __a : Tuple = backbone.conv_stem.normalization.running_var for i in range(13 ): __a : int = i + 1 __a : Dict = i * 2 __a : Dict = backbone.layer[pt_index] __a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __a : Union[str, Any] = pointer.convolution.weight __a : Optional[Any] = pointer.normalization.bias __a : Union[str, Any] = pointer.normalization.weight __a : List[Any] = pointer.normalization.running_mean __a : Tuple = pointer.normalization.running_var __a : List[str] = backbone.layer[pt_index + 1] __a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __a : Optional[int] = pointer.convolution.weight __a : List[str] = pointer.normalization.bias __a : Dict = pointer.normalization.weight __a : Dict = pointer.normalization.running_mean __a : Optional[int] = pointer.normalization.running_var if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __a : Optional[int] = model.classifier.weight __a : List[Any] = model.classifier.bias return tf_to_pt_map def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) __a : Optional[int] = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""" ) __a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __a : Optional[Any] = array # Build TF to PyTorch weights loading map __a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""" ) if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""" ) continue __a : Union[str, Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __a : Union[str, Any] = array.squeeze().transpose() else: __a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" ) logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" ) __a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE ) tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE ) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" ) return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ): __a , __a : Any = features.shape[-2:] __a , __a : int = conv_layer.stride __a , __a : Any = conv_layer.kernel_size if in_height % stride_height == 0: __a : int = max(kernel_height - stride_height , 0 ) else: __a : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __a : Any = max(kernel_width - stride_width , 0 ) else: __a : str = max(kernel_width - (in_width % stride_width) , 0 ) __a : int = pad_along_width // 2 __a : Dict = pad_along_width - pad_left __a : List[str] = pad_along_height // 2 __a : Union[str, Any] = pad_along_height - pad_top __a : str = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 ) class __UpperCamelCase ( nn.Module ): def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ): '''simple docstring''' super().__init__() __a : Optional[int] = config if in_channels % groups != 0: raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __a : Union[str, Any] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , ) if use_normalization: __a : List[str] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __a : Tuple = None if use_activation: if isinstance(__a , __a ): __a : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __a : Union[str, Any] = ACTaFN[config.hidden_act] else: __a : Dict = config.hidden_act else: __a : List[Any] = None def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config.tf_padding: __a : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __a : Union[str, Any] = self.convolution(__a ) if self.normalization is not None: __a : str = self.normalization(__a ) if self.activation is not None: __a : Optional[int] = self.activation(__a ) return features class __UpperCamelCase ( lowerCAmelCase_ ): A_ = MobileNetVaConfig A_ = load_tf_weights_in_mobilenet_va A_ = "mobilenet_v1" A_ = "pixel_values" A_ = False def __UpperCAmelCase ( self , __a ): '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a , __a = True ): '''simple docstring''' super().__init__(__a ) __a : Optional[int] = config __a : str = 32 __a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) __a : Union[str, Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __a : Any = nn.ModuleList() for i in range(13 ): __a : Union[str, Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCAmelCase ( self , __a ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __a : Union[str, Any] = self.conv_stem(__a ) __a : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __a : List[str] = layer_module(__a ) if output_hidden_states: __a : List[Any] = all_hidden_states + (hidden_states,) __a : str = hidden_states if self.pooler is not None: __a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __a : int = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , ) class __UpperCamelCase ( lowerCAmelCase_ ): def __init__( self , __a ): '''simple docstring''' super().__init__(__a ) __a : Tuple = config.num_labels __a : Tuple = MobileNetVaModel(__a ) __a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ): '''simple docstring''' __a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __a : List[str] = outputs.pooler_output if return_dict else outputs[1] __a : int = self.classifier(self.dropout(__a ) ) __a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __a : str = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __a : int = 'single_label_classification' else: __a : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __a : Optional[Any] = MSELoss() if self.num_labels == 1: __a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __a : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __a : List[str] = CrossEntropyLoss() __a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __a : Tuple = BCEWithLogitsLoss() __a : Optional[int] = loss_fct(__a , __a ) if not return_dict: __a : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1