code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } __SCREAMING_SNAKE_CASE ={ "b0": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 224, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 1280, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 240, "dropout_rate": 0.2, "dw_padding": [16], }, "b2": { "hidden_dim": 1408, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 260, "dropout_rate": 0.3, "dw_padding": [5, 8, 16], }, "b3": { "hidden_dim": 1536, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 300, "dropout_rate": 0.3, "dw_padding": [5, 18], }, "b4": { "hidden_dim": 1792, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 380, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 2048, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 456, "dropout_rate": 0.4, "dw_padding": [13, 27], }, "b6": { "hidden_dim": 2304, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 528, "dropout_rate": 0.5, "dw_padding": [31], }, "b7": { "hidden_dim": 2560, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 600, "dropout_rate": 0.5, "dw_padding": [18], }, } def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ) -> int: lowercase_ : str = EfficientNetConfig() lowercase_ : Optional[int] = CONFIG_MAP[model_name]['hidden_dim'] lowercase_ : Any = CONFIG_MAP[model_name]['width_coef'] lowercase_ : Optional[int] = CONFIG_MAP[model_name]['depth_coef'] lowercase_ : List[Any] = CONFIG_MAP[model_name]['image_size'] lowercase_ : List[str] = CONFIG_MAP[model_name]['dropout_rate'] lowercase_ : List[Any] = CONFIG_MAP[model_name]['dw_padding'] lowercase_ : int = 'huggingface/label-files' lowercase_ : Any = 'imagenet-1k-id2label.json' lowercase_ : Union[str, Any] = 10_00 lowercase_ : str = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : Dict = idalabel lowercase_ : Dict = {v: k for k, v in idalabel.items()} return config def lowercase__( ) -> Any: lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im def lowercase__( __SCREAMING_SNAKE_CASE : int ) -> int: lowercase_ : Tuple = CONFIG_MAP[model_name]['image_size'] lowercase_ : Any = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__SCREAMING_SNAKE_CASE , ) return preprocessor def lowercase__( __SCREAMING_SNAKE_CASE : int ) -> Any: lowercase_ : List[Any] = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase_ : str = sorted(set(__SCREAMING_SNAKE_CASE ) ) lowercase_ : int = len(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = {b: str(__SCREAMING_SNAKE_CASE ) for b, i in zip(__SCREAMING_SNAKE_CASE , range(__SCREAMING_SNAKE_CASE ) )} lowercase_ : str = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase_ : Any = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase_ : Dict = {} for item in rename_keys: if item[0] in original_param_names: lowercase_ : Union[str, Any] = 'efficientnet.' + item[1] lowercase_ : Any = 'classifier.weight' lowercase_ : Tuple = 'classifier.bias' return key_mapping def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ) -> int: for key, value in tf_params.items(): if "normalization" in key: continue lowercase_ : Any = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase_ : int = torch.from_numpy(__SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase_ : str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase_ : List[str] = torch.from_numpy(np.transpose(__SCREAMING_SNAKE_CASE ) ) else: lowercase_ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> int: lowercase_ : Any = model_classes[model_name]( include_top=__SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=__SCREAMING_SNAKE_CASE , input_shape=__SCREAMING_SNAKE_CASE , pooling=__SCREAMING_SNAKE_CASE , classes=10_00 , classifier_activation='softmax' , ) lowercase_ : Tuple = original_model.trainable_variables lowercase_ : str = original_model.non_trainable_variables lowercase_ : Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase_ : Dict = param.numpy() lowercase_ : str = list(tf_params.keys() ) # Load HuggingFace model lowercase_ : List[str] = get_efficientnet_config(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = EfficientNetForImageClassification(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : List[str] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase_ : List[str] = rename_keys(__SCREAMING_SNAKE_CASE ) replace_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase_ : List[Any] = convert_image_processor(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase_ : Dict = hf_model(**__SCREAMING_SNAKE_CASE ) lowercase_ : Any = outputs.logits.detach().numpy() # Original model inference lowercase_ : str = False lowercase_ : List[Any] = CONFIG_MAP[model_name]['image_size'] lowercase_ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase_ : int = image.img_to_array(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = np.expand_dims(__SCREAMING_SNAKE_CASE , axis=0 ) lowercase_ : Dict = original_model.predict(__SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(__SCREAMING_SNAKE_CASE ): os.mkdir(__SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(__SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) lowercase_ : List[str] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(__SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC __SCREAMING_SNAKE_CASE =parse(importlib.metadata.version("torch")) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Version] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' ) lowercase_ : Union[str, Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = parse(importlib.metadata.version(__SCREAMING_SNAKE_CASE ) ) return operation(__SCREAMING_SNAKE_CASE , parse(__SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): return compare_versions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class UpperCamelCase ( lowercase_ ): lowercase = 'perceiver' def __init__( self ,__UpperCamelCase=256 ,__UpperCamelCase=1280 ,__UpperCamelCase=768 ,__UpperCamelCase=1 ,__UpperCamelCase=26 ,__UpperCamelCase=8 ,__UpperCamelCase=8 ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="kv" ,__UpperCamelCase=1 ,__UpperCamelCase=1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=262 ,__UpperCamelCase=2048 ,__UpperCamelCase=56 ,__UpperCamelCase=[368, 496] ,__UpperCamelCase=16 ,__UpperCamelCase=1920 ,__UpperCamelCase=16 ,__UpperCamelCase=[1, 16, 224, 224] ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : Dict = num_latents lowercase_ : List[str] = d_latents lowercase_ : Tuple = d_model lowercase_ : Optional[Any] = num_blocks lowercase_ : Dict = num_self_attends_per_block lowercase_ : Tuple = num_self_attention_heads lowercase_ : Optional[Any] = num_cross_attention_heads lowercase_ : Tuple = qk_channels lowercase_ : Tuple = v_channels lowercase_ : Any = cross_attention_shape_for_attention lowercase_ : Any = self_attention_widening_factor lowercase_ : str = cross_attention_widening_factor lowercase_ : Optional[Any] = hidden_act lowercase_ : Dict = attention_probs_dropout_prob lowercase_ : Any = initializer_range lowercase_ : Dict = layer_norm_eps lowercase_ : int = use_query_residual # masked language modeling attributes lowercase_ : Tuple = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings # image classification attributes lowercase_ : Union[str, Any] = image_size # flow attributes lowercase_ : Dict = train_size # multimodal autoencoding attributes lowercase_ : Optional[int] = num_frames lowercase_ : List[Any] = audio_samples_per_frame lowercase_ : str = samples_per_patch lowercase_ : Dict = output_shape class UpperCamelCase ( lowercase_ ): @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": lowercase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase_ : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def _UpperCAmelCase ( self ) -> float: '''simple docstring''' return 1e-4 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = 3 ,__UpperCamelCase = 40 ,__UpperCamelCase = 40 ,) -> Mapping[str, Any]: '''simple docstring''' if isinstance(__UpperCamelCase ,__UpperCamelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : List[Any] = compute_effective_axis_dimension( __UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase_ : Optional[Any] = preprocessor.num_special_tokens_to_add(__UpperCamelCase ) lowercase_ : int = compute_effective_axis_dimension( __UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence lowercase_ : Dict = [' '.join(['a'] ) * seq_length] * batch_size lowercase_ : int = dict(preprocessor(__UpperCamelCase ,return_tensors=__UpperCamelCase ) ) lowercase_ : Union[str, Any] = inputs.pop('input_ids' ) return inputs elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : Union[str, Any] = compute_effective_axis_dimension(__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase_ : Dict = self._generate_dummy_images(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = dict(preprocessor(images=__UpperCamelCase ,return_tensors=__UpperCamelCase ) ) lowercase_ : List[Any] = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ="▁" __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} __SCREAMING_SNAKE_CASE ={ "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } __SCREAMING_SNAKE_CASE ={ "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } __SCREAMING_SNAKE_CASE ={ "ernie-m-base": 514, "ernie-m-large": 514, } __SCREAMING_SNAKE_CASE ={ "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCamelCase ( lowercase_ ): lowercase = ['input_ids'] lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = RESOURCE_FILES_NAMES def __init__( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase="utf8" ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,vocab_file=__UpperCamelCase ,encoding=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) lowercase_ : str = do_lower_case lowercase_ : Union[str, Any] = sentencepiece_model_ckpt lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowercase_ : Union[str, Any] = self.load_vocab(filepath=__UpperCamelCase ) else: lowercase_ : Optional[Any] = {self.sp_model.id_to_piece(__UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )} lowercase_ : int = {v: k for k, v in self.vocab.items()} def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' if text is None: return None lowercase_ : Union[str, Any] = self.tokenize(__UpperCamelCase ) lowercase_ : Optional[int] = '', [] for i, ch in enumerate(__UpperCamelCase ): if ch in self.SP_CHAR_MAPPING: lowercase_ : Any = self.SP_CHAR_MAPPING.get(__UpperCamelCase ) else: lowercase_ : Any = unicodedata.normalize('NFKC' ,__UpperCamelCase ) if self.is_whitespace(__UpperCamelCase ): continue normalized_text += ch char_mapping.extend([i] * len(__UpperCamelCase ) ) lowercase_ : Optional[Any] = normalized_text, [], 0 if self.do_lower_case: lowercase_ : int = text.lower() for token in split_tokens: if token[:1] == "▁": lowercase_ : Optional[int] = token[1:] lowercase_ : Union[str, Any] = text[offset:].index(__UpperCamelCase ) + offset lowercase_ : int = start + len(__UpperCamelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowercase_ : Dict = end return token_mapping @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.vocab ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return dict(self.vocab ,**self.added_tokens_encoder ) def __getstate__( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.__dict__.copy() lowercase_ : List[str] = None return state def __setstate__( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : int = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : List[str] = {} lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(__UpperCamelCase ,__UpperCamelCase ) for c in text) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=64 ,__UpperCamelCase=0.1 ) -> List[Any]: '''simple docstring''' if self.sp_model_kwargs.get('enable_sampling' ) is True: lowercase_ : Optional[int] = True if self.sp_model_kwargs.get('alpha' ) is not None: lowercase_ : List[str] = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowercase_ : List[Any] = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowercase_ : List[Any] = self.sp_model.EncodeAsPieces(__UpperCamelCase ) else: lowercase_ : Union[str, Any] = self.sp_model.SampleEncodeAsPieces(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = [] for pi, piece in enumerate(__UpperCamelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__UpperCamelCase ) and pi != 0: new_pieces.append(__UpperCamelCase ) continue else: continue lowercase_ : Dict = 0 for i, chunk in enumerate(__UpperCamelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__UpperCamelCase ) or self.is_punct(__UpperCamelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__UpperCamelCase ) lowercase_ : Optional[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase_ : Any = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase_ : Any = i if len(__UpperCamelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[str] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = self.convert_ids_to_tokens(__UpperCamelCase ) lowercase_ : Optional[Any] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.vocab.get(__UpperCamelCase ,self.vocab.get(self.unk_token ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' return self.reverse_vocab.get(__UpperCamelCase ,self.unk_token ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[str]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ : str = [self.cls_token_id] lowercase_ : Any = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> str: '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=False ) -> Union[str, Any]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(__UpperCamelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__UpperCamelCase ) + 1) + [1] * (len(__UpperCamelCase ) + 3) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__UpperCamelCase ) == 1: lowercase_ : Tuple = unicodedata.category(__UpperCamelCase ) if cat == "Zs": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = {} with io.open(__UpperCamelCase ,'r' ,encoding='utf-8' ) as f: for index, line in enumerate(__UpperCamelCase ): lowercase_ : List[str] = line.rstrip('\n' ) lowercase_ : int = int(__UpperCamelCase ) return token_to_idx def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Dict = 0 if os.path.isdir(__UpperCamelCase ): lowercase_ : Optional[Any] = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowercase_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() ,key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) lowercase_ : Optional[Any] = token_index writer.write(token + '\n' ) index += 1 lowercase_ : Any = os.path.join(__UpperCamelCase ,'sentencepiece.bpe.model' ) with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (vocab_file,)
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ : Union[str, Any] = array[smallest], array[idx] ( lowercase_ ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) - pow(__SCREAMING_SNAKE_CASE , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) - pow(__SCREAMING_SNAKE_CASE , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) + pow(__SCREAMING_SNAKE_CASE , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=3 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=10 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[1, 1, 2, 1] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase="relu" ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Optional[Any]: '''simple docstring''' lowercase_ : List[str] = parent lowercase_ : List[Any] = batch_size lowercase_ : Optional[Any] = image_size lowercase_ : str = num_channels lowercase_ : List[Any] = embeddings_size lowercase_ : Any = hidden_sizes lowercase_ : Tuple = depths lowercase_ : int = is_training lowercase_ : List[Any] = use_labels lowercase_ : str = hidden_act lowercase_ : str = num_labels lowercase_ : Dict = scope lowercase_ : List[str] = len(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Any = None if self.use_labels: lowercase_ : str = ids_tensor([self.batch_size] ,self.num_labels ) lowercase_ : List[Any] = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[Any] = RegNetModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : int = self.num_labels lowercase_ : Dict = RegNetForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[Any] = self.prepare_config_and_inputs() lowercase_ : List[Any] = config_and_inputs lowercase_ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () lowercase = ( {'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = RegNetModelTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(__UpperCamelCase ) lowercase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Optional[int] = [*signature.parameters.keys()] lowercase_ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(config=__UpperCamelCase ) for name, module in model.named_modules(): if isinstance(__UpperCamelCase ,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) self.assertTrue( torch.all(module.bias == 0 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Any = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : int = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,) lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Optional[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase_ : Optional[Any] = layer_type lowercase_ : Optional[Any] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Any = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Optional[Any] = RegNetModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : str = prepare_img() lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : Tuple = model(**__UpperCamelCase ) # verify the logits lowercase_ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" from __future__ import annotations import time import numpy as np __SCREAMING_SNAKE_CASE =[8, 5, 9, 7] __SCREAMING_SNAKE_CASE =[ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __SCREAMING_SNAKE_CASE =[ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : Tuple = claim_vector lowercase_ : Optional[Any] = allocated_resources_table lowercase_ : str = maximum_claim_table def _UpperCAmelCase ( self ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _UpperCAmelCase ( self ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _UpperCAmelCase ( self ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _UpperCAmelCase ( self ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__UpperCamelCase ): i for i in self.__need()} def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Optional[Any] = self.__need() lowercase_ : List[Any] = self.__allocated_resources_table lowercase_ : List[Any] = self.__available_resources() lowercase_ : int = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: lowercase_ : Tuple = False for each_need in need_list: lowercase_ : Any = True for index, need in enumerate(__UpperCamelCase ): if need > available_resources[index]: lowercase_ : Tuple = False break if execution: lowercase_ : Optional[Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase_ : int = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__UpperCamelCase ) # update available/freed resources stack lowercase_ : List[str] = np.array(__UpperCamelCase ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__UpperCamelCase ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__UpperCamelCase ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__UpperCamelCase ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__UpperCamelCase ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__UpperCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=True ,__UpperCamelCase=1 / 255 ,__UpperCamelCase=True ,) -> List[str]: '''simple docstring''' lowercase_ : str = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} lowercase_ : Dict = parent lowercase_ : str = batch_size lowercase_ : Dict = num_channels lowercase_ : str = min_resolution lowercase_ : Optional[Any] = max_resolution lowercase_ : Any = do_resize lowercase_ : Optional[int] = size lowercase_ : Union[str, Any] = do_normalize lowercase_ : Any = image_mean lowercase_ : List[Any] = image_std lowercase_ : Tuple = do_rescale lowercase_ : Optional[int] = rescale_factor lowercase_ : List[Any] = do_pad def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]: '''simple docstring''' if not batched: lowercase_ : Optional[Any] = image_inputs[0] if isinstance(__UpperCamelCase ,Image.Image ): lowercase_ : Optional[Any] = image.size else: lowercase_ : Optional[int] = image.shape[1], image.shape[2] if w < h: lowercase_ : Tuple = int(self.size['shortest_edge'] * h / w ) lowercase_ : Tuple = self.size['shortest_edge'] elif w > h: lowercase_ : int = self.size['shortest_edge'] lowercase_ : Any = int(self.size['shortest_edge'] * w / h ) else: lowercase_ : str = self.size['shortest_edge'] lowercase_ : Union[str, Any] = self.size['shortest_edge'] else: lowercase_ : int = [] for image in image_inputs: lowercase_ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase_ : Union[str, Any] = max(__UpperCamelCase ,key=lambda __UpperCamelCase : item[0] )[0] lowercase_ : Any = max(__UpperCamelCase ,key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" lowercase = ConditionalDetrImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[str] = ConditionalDetrImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad ,__UpperCamelCase ) lowercase_ : Any = self.image_processing_class.from_dict( self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,Image.Image ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowercase_ : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase ) lowercase_ : Dict = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,np.ndarray ) # Test not batched input lowercase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values lowercase_ : Dict = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowercase_ : Any = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values lowercase_ : str = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,torch.Tensor ) # Test not batched input lowercase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values lowercase_ : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase ,batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f: lowercase_ : Optional[int] = json.loads(f.read() ) lowercase_ : str = {'image_id': 3_9769, 'annotations': target} # encode them lowercase_ : Any = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' ) lowercase_ : Dict = image_processing(images=__UpperCamelCase ,annotations=__UpperCamelCase ,return_tensors='pt' ) # verify pixel values lowercase_ : Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__UpperCamelCase ,atol=1e-4 ) ) # verify area lowercase_ : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__UpperCamelCase ) ) # verify boxes lowercase_ : str = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape ,__UpperCamelCase ) lowercase_ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__UpperCamelCase ,atol=1e-3 ) ) # verify image_id lowercase_ : Any = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__UpperCamelCase ) ) # verify is_crowd lowercase_ : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__UpperCamelCase ) ) # verify class_labels lowercase_ : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__UpperCamelCase ) ) # verify orig_size lowercase_ : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__UpperCamelCase ) ) # verify size lowercase_ : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__UpperCamelCase ) ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f: lowercase_ : Dict = json.loads(f.read() ) lowercase_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} lowercase_ : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them lowercase_ : Optional[int] = ConditionalDetrImageProcessor(format='coco_panoptic' ) lowercase_ : Optional[Any] = image_processing(images=__UpperCamelCase ,annotations=__UpperCamelCase ,masks_path=__UpperCamelCase ,return_tensors='pt' ) # verify pixel values lowercase_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__UpperCamelCase ,atol=1e-4 ) ) # verify area lowercase_ : List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__UpperCamelCase ) ) # verify boxes lowercase_ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape ,__UpperCamelCase ) lowercase_ : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__UpperCamelCase ,atol=1e-3 ) ) # verify image_id lowercase_ : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__UpperCamelCase ) ) # verify is_crowd lowercase_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__UpperCamelCase ) ) # verify class_labels lowercase_ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__UpperCamelCase ) ) # verify masks lowercase_ : List[str] = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__UpperCamelCase ) # verify orig_size lowercase_ : Optional[int] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__UpperCamelCase ) ) # verify size lowercase_ : List[str] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__UpperCamelCase ) )
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = feature_size lowercase_ : Optional[Any] = sampling_rate lowercase_ : int = padding_value lowercase_ : Dict = kwargs.pop('padding_side' ,'right' ) lowercase_ : str = kwargs.pop('return_attention_mask' ,__UpperCamelCase ) super().__init__(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if isinstance(__UpperCamelCase ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ): lowercase_ : List[Any] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( 'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`' f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) lowercase_ : int = processed_features[self.model_input_names[0]] lowercase_ : Union[str, Any] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__UpperCamelCase ) == 0: if return_attention_mask: lowercase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowercase_ : Union[str, Any] = required_input[0] if isinstance(__UpperCamelCase ,(list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowercase_ : List[Any] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__UpperCamelCase ): lowercase_ : List[str] = required_input[index][0] if return_tensors is None: if is_tf_tensor(__UpperCamelCase ): lowercase_ : str = 'tf' elif is_torch_tensor(__UpperCamelCase ): lowercase_ : Tuple = 'pt' elif isinstance(__UpperCamelCase ,(int, float, list, tuple, np.ndarray) ): lowercase_ : str = 'np' else: raise ValueError( f'''type of {first_element} unknown: {type(__UpperCamelCase )}. ''' 'Should be one of a python, numpy, pytorch or tensorflow object.' ) for key, value in processed_features.items(): if isinstance(value[0] ,(int, float) ): lowercase_ : List[str] = to_numpy(__UpperCamelCase ) else: lowercase_ : Optional[Any] = [to_numpy(__UpperCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy lowercase_ : int = self._get_padding_strategies(padding=__UpperCamelCase ,max_length=__UpperCamelCase ) lowercase_ : List[str] = processed_features[self.model_input_names[0]] lowercase_ : Union[str, Any] = len(__UpperCamelCase ) if not all(len(__UpperCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError('Some items in the output dictionary have a different batch size than others.' ) lowercase_ : Dict = [] for i in range(__UpperCamelCase ): lowercase_ : Any = {k: v[i] for k, v in processed_features.items()} # truncation lowercase_ : List[Any] = self._truncate( __UpperCamelCase ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,truncation=__UpperCamelCase ,) truncated_inputs.append(__UpperCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowercase_ : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowercase_ : Dict = PaddingStrategy.MAX_LENGTH lowercase_ : List[str] = {} for i in range(__UpperCamelCase ): # padding lowercase_ : str = self._pad( truncated_inputs[i] ,max_length=__UpperCamelCase ,padding_strategy=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) for key, value in outputs.items(): if key not in batch_outputs: lowercase_ : Optional[int] = [] if value.dtype is np.dtype(np.floataa ): lowercase_ : Optional[int] = value.astype(np.floataa ) batch_outputs[key].append(__UpperCamelCase ) return BatchFeature(__UpperCamelCase ,tensor_type=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PaddingStrategy.DO_NOT_PAD ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> dict: '''simple docstring''' lowercase_ : Optional[Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowercase_ : Optional[Any] = len(__UpperCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowercase_ : str = np.ones(len(__UpperCamelCase ) ,dtype=np.intaa ) if needs_to_be_padded: lowercase_ : Optional[int] = max_length - len(__UpperCamelCase ) if self.padding_side == "right": if return_attention_mask: lowercase_ : Union[str, Any] = np.pad( processed_features['attention_mask'] ,(0, difference) ) lowercase_ : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowercase_ : Tuple = np.pad( __UpperCamelCase ,__UpperCamelCase ,'constant' ,constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowercase_ : Optional[Any] = np.pad( processed_features['attention_mask'] ,(difference, 0) ) lowercase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowercase_ : Any = np.pad( __UpperCamelCase ,__UpperCamelCase ,'constant' ,constant_values=self.padding_value ) else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return processed_features def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> int: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' ) lowercase_ : Union[str, Any] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase_ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase_ : Any = len(__UpperCamelCase ) > max_length if needs_to_be_truncated: lowercase_ : Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowercase_ : Union[str, Any] = processed_features['attention_mask'][:max_length] return processed_features def _UpperCAmelCase ( self ,__UpperCamelCase=False ,__UpperCamelCase=None ) -> Dict: '''simple docstring''' if padding is not False: if padding is True: lowercase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Optional[Any] = PaddingStrategy(__UpperCamelCase ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Union[str, Any] = padding else: lowercase_ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( 'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use' ' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' ) return padding_strategy
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class UpperCamelCase ( lowercase_ ): lowercase = 'swinv2' lowercase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,__UpperCamelCase=224 ,__UpperCamelCase=4 ,__UpperCamelCase=3 ,__UpperCamelCase=96 ,__UpperCamelCase=[2, 2, 6, 2] ,__UpperCamelCase=[3, 6, 12, 24] ,__UpperCamelCase=7 ,__UpperCamelCase=4.0 ,__UpperCamelCase=True ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=False ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=32 ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : str = image_size lowercase_ : List[Any] = patch_size lowercase_ : Any = num_channels lowercase_ : int = embed_dim lowercase_ : Optional[int] = depths lowercase_ : List[str] = len(__UpperCamelCase ) lowercase_ : int = num_heads lowercase_ : Union[str, Any] = window_size lowercase_ : str = mlp_ratio lowercase_ : int = qkv_bias lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Dict = drop_path_rate lowercase_ : Dict = hidden_act lowercase_ : List[Any] = use_absolute_embeddings lowercase_ : str = layer_norm_eps lowercase_ : Dict = initializer_range lowercase_ : List[Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase_ : Optional[Any] = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) ) lowercase_ : List[Any] = (0, 0, 0, 0)
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : Union[str, Any] = abs(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = 0 while n > 0: res += n % 10 n //= 10 return res def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[Any] = abs(__SCREAMING_SNAKE_CASE ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def lowercase__( __SCREAMING_SNAKE_CASE : int ): return sum(int(__SCREAMING_SNAKE_CASE ) for c in str(abs(__SCREAMING_SNAKE_CASE ) ) ) def lowercase__( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None: lowercase_ : int = F'''{func.__name__}({value})''' lowercase_ : Any = timeit(F'''__main__.{call}''' , setup='import __main__' ) print(F'''{call:56} = {func(__SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds''' ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowercase__( ): lowercase_ : List[Any] = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=__SCREAMING_SNAKE_CASE , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE ) return parser.parse_args() def lowercase__( ): lowercase_ : str = parse_args() # Import training_script as a module. lowercase_ : Union[str, Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowercase_ : List[str] = script_fpath.stem lowercase_ : Dict = importlib.import_module(__SCREAMING_SNAKE_CASE ) # Patch sys.argv lowercase_ : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __SCREAMING_SNAKE_CASE =models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __SCREAMING_SNAKE_CASE =train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) __SCREAMING_SNAKE_CASE =test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.img_to_array(test_image) __SCREAMING_SNAKE_CASE =np.expand_dims(test_image, axis=0) __SCREAMING_SNAKE_CASE =classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __SCREAMING_SNAKE_CASE ="Normal" if result[0][0] == 1: __SCREAMING_SNAKE_CASE ="Abnormality detected"
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __SCREAMING_SNAKE_CASE =["gpt2"] __SCREAMING_SNAKE_CASE ="gpt2" if is_tf_available(): class UpperCamelCase ( tf.Module ): def __init__( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' super().__init__() lowercase_ : List[str] = tokenizer lowercase_ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TFGPTaLMHeadModel.from_config(__UpperCamelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='text' ),) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.tokenizer(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenized['input_ids'].to_tensor() lowercase_ : List[str] = tf.cast(input_ids_dense > 0 ,tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowercase_ : List[str] = self.model(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase )['logits'] return outputs @require_tf @require_keras_nlp class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() lowercase_ : Tuple = [GPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowercase_ : Optional[Any] = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase_ : List[str] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] lowercase_ : str = list(zip(self.test_sentences ,self.test_sentences[::-1] ) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ): for test_inputs in self.test_sentences: lowercase_ : str = tokenizer([test_inputs] ,return_tensors='tf' ) lowercase_ : Optional[int] = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowercase_ : List[str] = python_outputs[key].numpy() lowercase_ : str = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase ,tf.intaa ) == tf_outputs_values ) ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : List[str] = tf.function(__UpperCamelCase ) for test_inputs in self.test_sentences: lowercase_ : Dict = tf.constant(__UpperCamelCase ) lowercase_ : Union[str, Any] = compiled_tokenizer(__UpperCamelCase ) lowercase_ : Dict = tf_tokenizer(__UpperCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : Union[str, Any] = ModelToSave(tokenizer=__UpperCamelCase ) lowercase_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : Tuple = model.serving(__UpperCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase_ : List[Any] = Path(__UpperCamelCase ) / 'saved.model' tf.saved_model.save(__UpperCamelCase ,__UpperCamelCase ,signatures={'serving_default': model.serving} ) lowercase_ : Optional[Any] = tf.saved_model.load(__UpperCamelCase ) lowercase_ : Any = loaded_model.signatures['serving_default'](__UpperCamelCase )['output_0'] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : Any = tf_tokenizer(__UpperCamelCase ) # Build model with some sample inputs lowercase_ : Union[str, Any] = tf_tokenizer.get_config() lowercase_ : Dict = TFGPTaTokenizer.from_config(__UpperCamelCase ) lowercase_ : Union[str, Any] = model_from_config(__UpperCamelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run lowercase_ : Union[str, Any] = 12_3123 for max_length in [3, 5, 1024]: lowercase_ : str = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : List[Any] = tf_tokenizer(__UpperCamelCase ,max_length=__UpperCamelCase ) lowercase_ : Any = out['input_ids'].numpy().shape[1] assert out_length == max_length
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import argparse from collections import defaultdict import yaml __SCREAMING_SNAKE_CASE ="docs/source/en/_toctree.yml" def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : Optional[int] = defaultdict(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = [] lowercase_ : Tuple = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(__SCREAMING_SNAKE_CASE ) lowercase_ : str = new_doc_list lowercase_ : List[Any] = [key for key, value in counts.items() if value > 1] lowercase_ : Tuple = [] for duplicate_key in duplicates: lowercase_ : str = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(__SCREAMING_SNAKE_CASE ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) lowercase_ : Optional[Any] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__SCREAMING_SNAKE_CASE ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(__SCREAMING_SNAKE_CASE ) # Sort return overview_doc def lowercase__( __SCREAMING_SNAKE_CASE : Dict=False ): with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: lowercase_ : Optional[int] = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ : int = content[api_idx]['sections'] # Then to the model doc lowercase_ : str = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowercase_ : Union[str, Any] = api_doc[scheduler_idx]['sections'] lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = False if new_scheduler_doc != scheduler_doc: lowercase_ : List[Any] = True if overwrite: lowercase_ : Optional[Any] = new_scheduler_doc if diff: if overwrite: lowercase_ : Tuple = api_doc with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def lowercase__( __SCREAMING_SNAKE_CASE : str=False ): with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: lowercase_ : str = yaml.safe_load(f.read() ) # Get to the API doc lowercase_ : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase_ : Optional[int] = content[api_idx]['sections'] # Then to the model doc lowercase_ : str = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowercase_ : List[Any] = False lowercase_ : List[str] = api_doc[pipeline_idx]['sections'] lowercase_ : Optional[Any] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowercase_ : int = pipeline_doc['section'] lowercase_ : str = clean_doc_toc(__SCREAMING_SNAKE_CASE ) if overwrite: lowercase_ : Any = new_sub_pipeline_doc new_pipeline_docs.append(__SCREAMING_SNAKE_CASE ) # sort overall pipeline doc lowercase_ : Optional[Any] = clean_doc_toc(__SCREAMING_SNAKE_CASE ) if new_pipeline_docs != pipeline_docs: lowercase_ : int = True if overwrite: lowercase_ : List[Any] = new_pipeline_docs if diff: if overwrite: lowercase_ : Union[str, Any] = api_doc with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(__SCREAMING_SNAKE_CASE , allow_unicode=__SCREAMING_SNAKE_CASE ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __SCREAMING_SNAKE_CASE =parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[str] = list(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = 0 for i in range(len(__SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count += 1 lowercase_ : List[Any] = '_' if count > 1: return False else: return "".join(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : list[str] ): lowercase_ : List[Any] = [] while True: lowercase_ : Optional[Any] = ['$'] * len(__SCREAMING_SNAKE_CASE ) lowercase_ : int = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Tuple = compare_string(binary[i] , binary[j] ) if k is False: lowercase_ : List[str] = '*' lowercase_ : Optional[Any] = '*' temp.append('X' ) for i in range(len(__SCREAMING_SNAKE_CASE ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__SCREAMING_SNAKE_CASE ) == 0: return pi lowercase_ : Optional[Any] = list(set(__SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Sequence[float] ): lowercase_ : Any = [] for minterm in minterms: lowercase_ : Union[str, Any] = '' for _ in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = str(minterm % 2 ) + string minterm //= 2 temp.append(__SCREAMING_SNAKE_CASE ) return temp def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = list(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE ) lowercase_ : str = 0 for i in range(len(__SCREAMING_SNAKE_CASE ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[str] ): lowercase_ : Tuple = [] lowercase_ : Optional[Any] = [0] * len(__SCREAMING_SNAKE_CASE ) for i in range(len(chart[0] ) ): lowercase_ : int = 0 lowercase_ : Dict = -1 for j in range(len(__SCREAMING_SNAKE_CASE ) ): if chart[j][i] == 1: count += 1 lowercase_ : Union[str, Any] = j if count == 1: lowercase_ : Dict = 1 for i in range(len(__SCREAMING_SNAKE_CASE ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : List[str] = 0 temp.append(prime_implicants[i] ) while True: lowercase_ : Optional[Any] = 0 lowercase_ : str = -1 lowercase_ : Any = 0 for i in range(len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Dict = chart[i].count(1 ) if count_n > max_n: lowercase_ : List[str] = count_n lowercase_ : str = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : List[Any] = 0 def lowercase__( __SCREAMING_SNAKE_CASE : list[str] , __SCREAMING_SNAKE_CASE : list[str] ): lowercase_ : int = [[0 for x in range(len(__SCREAMING_SNAKE_CASE ) )] for x in range(len(__SCREAMING_SNAKE_CASE ) )] for i in range(len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : str = prime_implicants[i].count('_' ) for j in range(len(__SCREAMING_SNAKE_CASE ) ): if is_for_table(prime_implicants[i] , binary[j] , __SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = 1 return chart def lowercase__( ): lowercase_ : List[Any] = int(input('Enter the no. of variables\n' ) ) lowercase_ : Optional[Any] = [ float(__SCREAMING_SNAKE_CASE ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] lowercase_ : Optional[int] = decimal_to_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = check(__SCREAMING_SNAKE_CASE ) print('Prime Implicants are:' ) print(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = prime_implicant_chart(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = selection(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print('Essential Prime Implicants are:' ) print(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() main()
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowercase__( ): lowercase_ : str = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' lowercase_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) return image def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : Union[str, Any] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : int = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = val def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowercase_ : List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowercase_ : Tuple = torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) ) lowercase_ : str = qkv_bias def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): lowercase_ : str = 3_64 if 'coco' in model_name else 2_24 lowercase_ : int = BlipaVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase_ : Tuple = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict() elif "opt-6.7b" in model_name: lowercase_ : Optional[int] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict() elif "t5-xl" in model_name: lowercase_ : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase_ : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() lowercase_ : List[Any] = BlipaConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ): lowercase_ : Dict = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) lowercase_ : Any = tokenizer('\n' , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids[0] lowercase_ : Any = get_blipa_config(__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = BlipaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : str = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } lowercase_ : Any = model_name_to_original[model_name] # load original model print('Loading original model...' ) lowercase_ : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu' lowercase_ : List[str] = load_model_and_preprocess( name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) original_model.eval() print('Done!' ) # update state dict keys lowercase_ : Dict = original_model.state_dict() lowercase_ : List[str] = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE ) if key.startswith('Qformer.bert' ): lowercase_ : Optional[Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: lowercase_ : Optional[int] = key.replace('self' , 'attention' ) if "opt_proj" in key: lowercase_ : List[Any] = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: lowercase_ : List[Any] = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): lowercase_ : Dict = key.replace('opt' , 'language' ) if key.startswith('t5' ): lowercase_ : Tuple = key.replace('t5' , 'language' ) lowercase_ : Union[str, Any] = val # read in qv biases read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) assert len(__SCREAMING_SNAKE_CASE ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase_ : Tuple = load_demo_image() lowercase_ : Any = vis_processors['eval'](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE ) # create processor lowercase_ : List[Any] = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = BlipaProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values.to(__SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) original_model.to(__SCREAMING_SNAKE_CASE ) hf_model.to(__SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "opt" in model_name: lowercase_ : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits lowercase_ : str = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits else: lowercase_ : str = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits lowercase_ : int = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) lowercase_ : Optional[Any] = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase_ : Union[str, Any] = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__SCREAMING_SNAKE_CASE ) assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase_ : Optional[int] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__SCREAMING_SNAKE_CASE ) else: # cast to same type lowercase_ : List[Any] = logits.dtype assert torch.allclose(original_logits.to(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , atol=1E-2 ) print('Looks ok!' ) print('Generating a caption...' ) lowercase_ : Union[str, Any] = '' lowercase_ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = original_model.generate({'image': original_pixel_values} ) lowercase_ : Any = hf_model.generate( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = input_ids.shape[1] lowercase_ : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = [text.strip() for text in output_text] print('HF generation:' , __SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() __SCREAMING_SNAKE_CASE =[ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): # Base Case if index == len(__SCREAMING_SNAKE_CASE ): return True # Recursive Step for i in range(__SCREAMING_SNAKE_CASE ): if valid_coloring(graph[index] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Color current vertex lowercase_ : Any = i # Validate coloring if util_color(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 ): return True # Backtrack lowercase_ : Dict = -1 return False def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[Any] = [-1] * len(__SCREAMING_SNAKE_CASE ) if util_color(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 ): return colored_vertices return []
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import cmath import math def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): lowercase_ : str = math.radians(__SCREAMING_SNAKE_CASE ) lowercase_ : int = math.radians(__SCREAMING_SNAKE_CASE ) # Convert voltage and current to rectangular form lowercase_ : Optional[int] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __SCREAMING_SNAKE_CASE ={ "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __SCREAMING_SNAKE_CASE ={ "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BartTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> int: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : str = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Optional[Any] = add_prefix_space lowercase_ : str = pre_tok_class(**__UpperCamelCase ) lowercase_ : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ : int = 'post_processor' lowercase_ : Any = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : List[str] = tuple(state['cls'] ) lowercase_ : Optional[Any] = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : List[str] = add_prefix_space lowercase_ : List[Any] = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : List[str] = trim_offsets lowercase_ : List[str] = True if changes_to_apply: lowercase_ : str = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : List[Any] = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : int = kwargs.get('is_split_into_words' ,__UpperCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Optional[Any] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Tuple = [self.sep_token_id] lowercase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE ={ "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ): if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: lowercase_ : str = TOKENIZER_CLASSES else: lowercase_ : int = {tokenizer_name: getattr(__SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: lowercase_ : Dict = TOKENIZER_CLASSES[tokenizer_name] lowercase_ : Union[str, Any] = True if checkpoint_name is None: lowercase_ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() ) else: lowercase_ : List[Any] = [checkpoint_name] logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer lowercase_ : List[Any] = tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: lowercase_ : List[str] = checkpoint.split('/' ) lowercase_ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif add_prefix: lowercase_ : List[str] = checkpoint lowercase_ : Optional[Any] = dump_path else: lowercase_ : int = None lowercase_ : str = dump_path logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: lowercase_ : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] lowercase_ : List[str] = file_path.split(__SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = None logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) lowercase_ : Optional[int] = tokenizer.save_pretrained( __SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE , filename_prefix=__SCREAMING_SNAKE_CASE ) logger.info(F'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(__SCREAMING_SNAKE_CASE ) logger.info(F'''=> removing {file_name}''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class UpperCamelCase ( lowercase_ ): lowercase = 4_2 lowercase = 4_2 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" from __future__ import annotations from random import random class UpperCamelCase : def __init__( self ,__UpperCamelCase = None ) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = value lowercase_ : str = random() lowercase_ : Node | None = None lowercase_ : Node | None = None def __repr__( self ) -> str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 ) def __str__( self ) -> str: '''simple docstring''' lowercase_ : List[str] = str(self.value ) + ' ' lowercase_ : Any = str(self.left or '' ) lowercase_ : List[str] = str(self.right or '' ) return value + left + right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: lowercase_ : Any = split(root.left , __SCREAMING_SNAKE_CASE ) return left, root else: lowercase_ : Union[str, Any] = split(root.right , __SCREAMING_SNAKE_CASE ) return root, right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : Node | None ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: lowercase_ : int = merge(left.right , __SCREAMING_SNAKE_CASE ) return left else: lowercase_ : List[str] = merge(__SCREAMING_SNAKE_CASE , right.left ) return right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Any = Node(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return merge(merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[Any] = split(__SCREAMING_SNAKE_CASE , value - 1 ) lowercase_ : str = split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None ): if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : str ): for arg in args.split(): if arg[0] == "+": lowercase_ : Union[str, Any] = insert(__SCREAMING_SNAKE_CASE , int(arg[1:] ) ) elif arg[0] == "-": lowercase_ : Union[str, Any] = erase(__SCREAMING_SNAKE_CASE , int(arg[1:] ) ) else: print('Unknown command' ) return root def lowercase__( ): lowercase_ : Optional[int] = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) lowercase_ : Dict = input() while args != "q": lowercase_ : Union[str, Any] = interact_treap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = RoFormerTokenizer lowercase = RoFormerTokenizerFast lowercase = True lowercase = True def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' super().setUp() def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = '永和服装饰品有限公司,今天天气非常好' lowercase_ : int = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = self.get_tokenizer() lowercase_ : str = self.get_chinese_input_output_texts() lowercase_ : Any = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,output_text.split() ) lowercase_ : Optional[int] = tokens + [tokenizer.unk_token] lowercase_ : Union[str, Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = self.get_rust_tokenizer() lowercase_ : int = self.get_chinese_input_output_texts() lowercase_ : Union[str, Any] = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,output_text.split() ) lowercase_ : str = tokens + [tokenizer.unk_token] lowercase_ : Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' pass
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json", # See all XGLM models at https://huggingface.co/models?filter=xglm } class UpperCamelCase ( lowercase_ ): lowercase = 'xglm' lowercase = ['past_key_values'] lowercase = { 'num_attention_heads': 'attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'num_layers', } def __init__( self ,__UpperCamelCase=25_6008 ,__UpperCamelCase=2048 ,__UpperCamelCase=1024 ,__UpperCamelCase=4096 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = vocab_size lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : Tuple = d_model lowercase_ : int = ffn_dim lowercase_ : Tuple = num_layers lowercase_ : Optional[int] = attention_heads lowercase_ : Dict = activation_function lowercase_ : Any = dropout lowercase_ : Dict = attention_dropout lowercase_ : Union[str, Any] = activation_dropout lowercase_ : Dict = layerdrop lowercase_ : Dict = init_std lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase_ : Dict = use_cache super().__init__( pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,decoder_start_token_id=__UpperCamelCase ,**__UpperCamelCase ,)
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __SCREAMING_SNAKE_CASE =" \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir ,'schedulers/' ) ) lowercase_ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(__UpperCamelCase ,'src/diffusers/schedulers/scheduling_ddpm.py' ) ,os.path.join(self.diffusers_dir ,'schedulers/scheduling_ddpm.py' ) ,) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase_ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ) lowercase_ : List[Any] = black.format_str(__UpperCamelCase ,mode=__UpperCamelCase ) lowercase_ : Dict = os.path.join(self.diffusers_dir ,'new_code.py' ) with open(__UpperCamelCase ,'w' ,newline='\n' ) as f: f.write(__UpperCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name ,overwrite=__UpperCamelCase ) with open(__UpperCamelCase ,'r' ) as f: self.assertTrue(f.read() ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,REFERENCE_CODE + '\n' ,) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,__UpperCamelCase ,) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,re.sub('DDPM' ,'Test' ,__UpperCamelCase ) ,) # Copy consistency with a really long name lowercase_ : List[Any] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub('Bert' ,__UpperCamelCase ,__UpperCamelCase ) ,) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,__UpperCamelCase ,overwrite_result=re.sub('DDPM' ,'Test' ,__UpperCamelCase ) ,)
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def lowercase__( __SCREAMING_SNAKE_CASE : BertModel , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') lowercase_ : int = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): os.makedirs(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = model.state_dict() def to_tf_var_name(__SCREAMING_SNAKE_CASE : str ): for patt, repl in iter(__SCREAMING_SNAKE_CASE ): lowercase_ : List[Any] = name.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return F'''bert/{name}''' def create_tf_var(__SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : tf.Session ): lowercase_ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype ) lowercase_ : Optional[int] = tf.get_variable(dtype=__SCREAMING_SNAKE_CASE , shape=tensor.shape , name=__SCREAMING_SNAKE_CASE , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__SCREAMING_SNAKE_CASE ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase_ : Tuple = to_tf_var_name(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase_ : Optional[int] = torch_tensor.T lowercase_ : str = create_tf_var(tensor=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE , session=__SCREAMING_SNAKE_CASE ) tf.keras.backend.set_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = session.run(__SCREAMING_SNAKE_CASE ) print(F'''Successfully created {tf_name}: {np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' ) lowercase_ : Optional[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , model_name.replace('-' , '_' ) + '.ckpt' ) ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any]=None ): lowercase_ : Tuple = argparse.ArgumentParser() parser.add_argument('--model_name' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='Directory in which to save tensorflow model' ) lowercase_ : Optional[int] = parser.parse_args(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__SCREAMING_SNAKE_CASE , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
__SCREAMING_SNAKE_CASE =[0, 2, 4, 6, 8] __SCREAMING_SNAKE_CASE =[1, 3, 5, 7, 9] def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase_ : Optional[int] = 0 for digit in range(10 ): lowercase_ : List[str] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return result lowercase_ : Dict = 0 for digita in range(10 ): lowercase_ : Tuple = digita if (remainder + digita) % 2 == 0: lowercase_ : Dict = ODD_DIGITS else: lowercase_ : Any = EVEN_DIGITS for digita in other_parity_digits: lowercase_ : Union[str, Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return result def lowercase__( __SCREAMING_SNAKE_CASE : int = 9 ): lowercase_ : List[Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__SCREAMING_SNAKE_CASE , 0 , [0] * length , __SCREAMING_SNAKE_CASE ) return result if __name__ == "__main__": print(F"{solution() = }")
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=3 ,__UpperCamelCase=224 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,) -> int: '''simple docstring''' lowercase_ : List[str] = size if size is not None else {'height': 18, 'width': 18} lowercase_ : Optional[int] = parent lowercase_ : Dict = batch_size lowercase_ : Any = num_channels lowercase_ : Optional[int] = image_size lowercase_ : List[str] = min_resolution lowercase_ : Dict = max_resolution lowercase_ : Tuple = do_resize lowercase_ : List[str] = size lowercase_ : int = do_normalize lowercase_ : List[str] = image_mean lowercase_ : Any = image_std def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = ViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = EfficientFormerImageProcessorTester(self ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,Image.Image ) # Test not batched input lowercase_ : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,) # Test batched lowercase_ : Dict = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,np.ndarray ) # Test not batched input lowercase_ : Any = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,) # Test batched lowercase_ : List[str] = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,torch.Tensor ) # Test not batched input lowercase_ : Tuple = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,) # Test batched lowercase_ : Any = image_processor(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ) ,)
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE ={ "bart": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "layoutlm": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "wav2vec2": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=True ): if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase_ : Tuple = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase_ : Optional[Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) lowercase_ : int = config_class.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = True lowercase_ : int = True print(F'''Building TensorFlow model from configuration: {config}''' ) lowercase_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase_ : Union[str, Any] = cached_file( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase_ : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if compare_with_pt_model: lowercase_ : Optional[Any] = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network lowercase_ : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) lowercase_ : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowercase_ : List[Any] = pt_model(**pt_model.dummy_inputs ) lowercase_ : Optional[int] = pto[0].numpy() lowercase_ : Optional[Any] = tfo[0].numpy() lowercase_ : str = np.amax(np.abs(np_pt - np_tf ) ) print(F'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(F'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format='h5' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=False , ): if args_model_type is None: lowercase_ : Dict = list(MODEL_CLASSES.keys() ) else: lowercase_ : Optional[int] = [args_model_type] for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ): print('=' * 1_00 ) print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' ) print('=' * 1_00 ) if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase_ : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase_ : str = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase_ : Any = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ): print('-' * 1_00 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase_ : Any = model_shortcut_name elif only_convert_finetuned_models: print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' ) print('-' * 1_00 ) if config_shortcut_name in aws_config_map: lowercase_ : Dict = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) else: lowercase_ : Optional[int] = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase_ : Optional[int] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) else: lowercase_ : Any = model_shortcut_name if os.path.isfile(__SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = 'converted_model' convert_pt_checkpoint_to_tf( model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , ) if remove_cached_files: os.remove(__SCREAMING_SNAKE_CASE ) os.remove(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help=( F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " "convert all the models from AWS." ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help=( "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--config_file", default=None, type=str, help=( "The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name " "use the configuration associated to the shortcut name on the AWS" ), ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") __SCREAMING_SNAKE_CASE =parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" import math def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if initial_intensity < 0: raise ValueError('The value of intensity cannot be negative' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__SCREAMING_SNAKE_CASE ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' raise NotImplementedError() def _UpperCAmelCase ( self ) -> int: '''simple docstring''' raise NotImplementedError() class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Any = tokenizer lowercase_ : List[Any] = skip_prompt lowercase_ : Optional[Any] = decode_kwargs # variables used in the streaming process lowercase_ : Tuple = [] lowercase_ : List[Any] = 0 lowercase_ : Tuple = True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('TextStreamer only supports batch size 1' ) elif len(value.shape ) > 1: lowercase_ : int = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowercase_ : str = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowercase_ : Optional[int] = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('\n' ): lowercase_ : Optional[int] = text[self.print_len :] lowercase_ : str = [] lowercase_ : Tuple = 0 # If the last token is a CJK character, we print the characters. elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowercase_ : str = text[self.print_len :] self.print_len += len(__UpperCamelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowercase_ : List[Any] = text[self.print_len : text.rfind(' ' ) + 1] self.print_len += len(__UpperCamelCase ) self.on_finalized_text(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if len(self.token_cache ) > 0: lowercase_ : Any = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) lowercase_ : Optional[int] = text[self.print_len :] lowercase_ : Union[str, Any] = [] lowercase_ : Dict = 0 else: lowercase_ : Union[str, Any] = '' lowercase_ : Optional[int] = True self.on_finalized_text(__UpperCamelCase ,stream_end=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> Union[str, Any]: '''simple docstring''' print(__UpperCamelCase ,flush=__UpperCamelCase ,end='' if not stream_end else None ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,**__UpperCamelCase ) -> str: '''simple docstring''' super().__init__(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Tuple = Queue() lowercase_ : List[str] = None lowercase_ : Union[str, Any] = timeout def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> List[str]: '''simple docstring''' self.text_queue.put(__UpperCamelCase ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self ) -> Union[str, Any]: '''simple docstring''' return self def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
from math import factorial __SCREAMING_SNAKE_CASE ={str(d): factorial(d) for d in range(10)} def lowercase__( __SCREAMING_SNAKE_CASE : int ): return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) ) def lowercase__( ): lowercase_ : Dict = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i ) if __name__ == "__main__": print(F"{solution() = }")
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE ={ "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : int = 1 for i in range(1 , num + 1 ): fact *= i return fact def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = 0 while number > 0: lowercase_ : List[str] = number % 10 sum_of_digits += last_digit lowercase_ : str = number // 10 # Removing the last_digit from the given number return sum_of_digits def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00 ): lowercase_ : Tuple = factorial(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = split_and_add(__SCREAMING_SNAKE_CASE ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ="▁" __SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } __SCREAMING_SNAKE_CASE ={ "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off __SCREAMING_SNAKE_CASE =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ['input_ids', 'attention_mask'] lowercase = [] lowercase = [] def __init__( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : Union[str, Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ : Dict = kwargs.get('additional_special_tokens' ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) lowercase_ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase_ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase_ : Dict = 1 lowercase_ : Dict = len(self.sp_model ) lowercase_ : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase ) } lowercase_ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()} lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowercase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowercase_ : Tuple = src_lang if src_lang is not None else 'en_XX' lowercase_ : Tuple = self.lang_code_to_id[self._src_lang] lowercase_ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Dict: '''simple docstring''' lowercase_ : str = self.__dict__.copy() lowercase_ : Optional[int] = None return state def __setstate__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Optional[Any] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : Optional[Any] = {} lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ : List[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : List[Any] = [] lowercase_ : str = '' lowercase_ : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCamelCase ) + token lowercase_ : List[Any] = True lowercase_ : List[str] = [] else: current_sub_tokens.append(__UpperCamelCase ) lowercase_ : List[str] = False out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : List[Any] = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : int = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) lowercase_ : str = [1] * len(self.prefix_tokens ) lowercase_ : Union[str, Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowercase_ : Optional[int] = src_lang lowercase_ : Union[str, Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Dict = self.convert_tokens_to_ids(__UpperCamelCase ) lowercase_ : List[str] = tgt_lang_id return inputs def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "en_XX" ,__UpperCamelCase = None ,__UpperCamelCase = "ro_RO" ,**__UpperCamelCase ,) -> BatchEncoding: '''simple docstring''' lowercase_ : List[Any] = src_lang lowercase_ : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Dict = self.lang_code_to_id[src_lang] lowercase_ : Tuple = [self.cur_lang_code_id] lowercase_ : List[Any] = [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = self.lang_code_to_id[tgt_lang] lowercase_ : Any = [self.cur_lang_code_id] lowercase_ : int = [self.eos_token_id]
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ) -> Dict: print('Loading config file...' ) def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]="" , __SCREAMING_SNAKE_CASE : Union[str, Any]="." ): lowercase_ : Optional[Any] = [] for k, v in d.items(): lowercase_ : str = parent_key + sep + k if parent_key else k if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() ) else: items.append((new_key, v) ) return dict(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = argparse.Namespace() with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file: try: lowercase_ : int = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader ) lowercase_ : List[str] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE ) for k, v in flat_cfg.items(): setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) ) return config def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: lowercase_ : Optional[Any] = MobileViTVaConfig() lowercase_ : Optional[int] = False # dataset if task_name.startswith('imagenet1k_' ): lowercase_ : int = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : Optional[Any] = 3_84 else: lowercase_ : Dict = 2_56 lowercase_ : List[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowercase_ : Optional[Any] = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : Tuple = 3_84 else: lowercase_ : List[Any] = 2_56 lowercase_ : Optional[int] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowercase_ : Tuple = 1_51 lowercase_ : str = 5_12 lowercase_ : Optional[int] = 'ade20k-id2label.json' lowercase_ : Optional[int] = True elif task_name.startswith('voc_' ): lowercase_ : Any = 21 lowercase_ : Optional[Any] = 5_12 lowercase_ : Dict = 'pascal-voc-id2label.json' lowercase_ : Tuple = True # orig_config lowercase_ : List[str] = load_orig_config_file(__SCREAMING_SNAKE_CASE ) assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowercase_ : List[str] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: lowercase_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 ) lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label lowercase_ : List[str] = 'huggingface/label-files' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Any = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : int = idalabel lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()} return config def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> Any: lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = val def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> Tuple: if base_model: lowercase_ : Dict = '' else: lowercase_ : Optional[int] = 'mobilevitv2.' lowercase_ : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowercase_ : List[Any] = k[8:] else: lowercase_ : List[Any] = k if ".block." in k: lowercase_ : Optional[Any] = k_new.replace('.block.' , '.' ) if ".conv." in k: lowercase_ : List[str] = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: lowercase_ : List[Any] = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: lowercase_ : str = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowercase_ : Dict = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: lowercase_ : Any = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: lowercase_ : Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: lowercase_ : int = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowercase_ : Dict = [0, 1] elif i == 4: lowercase_ : Optional[Any] = [0, 1, 2, 3] elif i == 5: lowercase_ : List[str] = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: lowercase_ : int = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: lowercase_ : List[str] = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: lowercase_ : Optional[int] = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowercase_ : Any = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: lowercase_ : Union[str, Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowercase_ : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowercase_ : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: lowercase_ : str = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: lowercase_ : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: lowercase_ : List[str] = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: lowercase_ : Optional[Any] = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Dict ) -> Dict: lowercase_ : Optional[int] = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__SCREAMING_SNAKE_CASE ) for k in keys_to_ignore: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( ) -> Optional[Any]: lowercase_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowercase_ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ) -> Any: lowercase_ : int = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load original state_dict lowercase_ : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowercase_ : str = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : Optional[Any] = False else: lowercase_ : Tuple = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : int = False # remove and rename some keys of load the original model lowercase_ : Tuple = checkpoint remove_unused_keys(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load modified state_dict model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase_ : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase_ : Dict = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase_ : str = model(**__SCREAMING_SNAKE_CASE ) # verify classification model if task_name.startswith('imagenet' ): lowercase_ : int = outputs.logits lowercase_ : Any = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE ={ "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["ConditionalDetrFeatureExtractor"] __SCREAMING_SNAKE_CASE =["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ): if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path lowercase_ : int = quote(__SCREAMING_SNAKE_CASE ) return hfh.hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=__SCREAMING_SNAKE_CASE )
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : str ): """simple docstring""" lowercase_ : List[Any] = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) lowercase_ : Dict = hex_num[0] == '-' if is_negative: lowercase_ : str = hex_num[1:] try: lowercase_ : Any = int(__SCREAMING_SNAKE_CASE , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) lowercase_ : List[Any] = '' while int_num > 0: lowercase_ : Optional[int] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : int = generate_pascal_triangle(__SCREAMING_SNAKE_CASE ) for row_idx in range(__SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def lowercase__( __SCREAMING_SNAKE_CASE : int ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) lowercase_ : list[list[int]] = [] for current_row_idx in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) triangle.append(__SCREAMING_SNAKE_CASE ) return triangle def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[int] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowercase_ : Optional[int] = 1, 1 for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ): calculate_current_element( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return current_row def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ): lowercase_ : Tuple = triangle[current_row_idx - 1][current_col_idx - 1] lowercase_ : int = triangle[current_row_idx - 1][current_col_idx] lowercase_ : Any = above_to_left_elt + above_to_right_elt def lowercase__( __SCREAMING_SNAKE_CASE : int ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) lowercase_ : list[list[int]] = [[1]] for row_index in range(1 , __SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = [0] + result[-1] + [0] lowercase_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row lowercase_ : Optional[int] = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) ) lowercase_ : int = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowercase_ : List[Any] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowercase_ : List[Any] = row_first_half + row_second_half result.append(__SCREAMING_SNAKE_CASE ) return result def lowercase__( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None: lowercase_ : int = F'''{func.__name__}({value})''' lowercase_ : List[str] = timeit(F'''__main__.{call}''' , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) class UpperCamelCase : def __init__( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = False def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' if not self.initialized: lowercase_ : Optional[Any] = RagRetriever( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,) lowercase_ : Dict = True def _UpperCAmelCase ( self ) -> int: '''simple docstring''' self.retriever.index.init_index() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = self.retriever._main_retrieve(__UpperCamelCase ,__UpperCamelCase ) return doc_ids, retrieved_doc_embeds class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Optional[int]: '''simple docstring''' if index is not None and index.is_initialized() and len(__UpperCamelCase ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,index=__UpperCamelCase ,init_retrieval=__UpperCamelCase ,) lowercase_ : List[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) for worker in self.retrieval_workers ] ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowercase_ : Any = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )] lowercase_ : Optional[int] = ray.get(random_worker.retrieve.remote(__UpperCamelCase ,__UpperCamelCase ) ) else: lowercase_ : int = self._main_retrieve(__UpperCamelCase ,__UpperCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return super(__UpperCamelCase ,cls ).get_tokenizers(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Tuple = kwargs.pop('config' ,__UpperCamelCase ) or RagConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[Any] = RagTokenizer.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase ) lowercase_ : str = rag_tokenizer.question_encoder lowercase_ : Dict = rag_tokenizer.generator if indexed_dataset is not None: lowercase_ : Union[str, Any] = 'custom' lowercase_ : List[Any] = CustomHFIndex(config.retrieval_vector_size ,__UpperCamelCase ) else: lowercase_ : Union[str, Any] = cls._build_index(__UpperCamelCase ) return cls( __UpperCamelCase ,question_encoder_tokenizer=__UpperCamelCase ,generator_tokenizer=__UpperCamelCase ,retrieval_workers=__UpperCamelCase ,index=__UpperCamelCase ,)
350
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
351
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
0
"""simple docstring""" from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class UpperCamelCase : lowercase = 4_2 # [batch_size x 3] lowercase = 4_2 # [batch_size x 3] lowercase = 4_2 # [batch_size x 3] lowercase = 4_2 # [batch_size x 3] lowercase = 4_2 lowercase = 4_2 lowercase = 4_2 lowercase = 4_2 lowercase = 4_2 def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) ) def _UpperCAmelCase ( self ) -> torch.Tensor: '''simple docstring''' lowercase_ : Dict = torch.arange(self.height * self.width ) lowercase_ : str = torch.stack( [ pixel_indices % self.width, torch.div(__UpperCamelCase ,self.width ,rounding_mode='trunc' ), ] ,axis=1 ,) return coords @property def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[Any] = self.shape lowercase_ : List[Any] = int(np.prod(__UpperCamelCase ) ) lowercase_ : Dict = self.get_image_coords() lowercase_ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] ) lowercase_ : List[Any] = self.get_camera_rays(__UpperCamelCase ) lowercase_ : Tuple = rays.view(__UpperCamelCase ,inner_batch_size * self.height * self.width ,2 ,3 ) return rays def _UpperCAmelCase ( self ,__UpperCamelCase ) -> torch.Tensor: '''simple docstring''' lowercase_ : Tuple = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] lowercase_ : Dict = coords.view(__UpperCamelCase ,-1 ,2 ) lowercase_ : int = self.resolution() lowercase_ : List[Any] = self.fov() lowercase_ : Tuple = (flat.float() / (res - 1)) * 2 - 1 lowercase_ : Tuple = fracs * torch.tan(fov / 2 ) lowercase_ : str = fracs.view(__UpperCamelCase ,-1 ,2 ) lowercase_ : Optional[Any] = ( self.z.view(__UpperCamelCase ,1 ,3 ) + self.x.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, :1] + self.y.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, 1:] ) lowercase_ : Tuple = directions / directions.norm(dim=-1 ,keepdim=__UpperCamelCase ) lowercase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(__UpperCamelCase ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ), directions, ] ,dim=2 ,) return rays.view(__UpperCamelCase ,*__UpperCamelCase ,2 ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> "DifferentiableProjectiveCamera": '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=__UpperCamelCase ,height=__UpperCamelCase ,x_fov=self.x_fov ,y_fov=self.y_fov ,) def lowercase__( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Optional[Any] = [] lowercase_ : Dict = [] lowercase_ : Tuple = [] lowercase_ : List[Any] = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): lowercase_ : Tuple = np.array([np.sin(__SCREAMING_SNAKE_CASE ), np.cos(__SCREAMING_SNAKE_CASE ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) lowercase_ : Optional[int] = -z * 4 lowercase_ : List[str] = np.array([np.cos(__SCREAMING_SNAKE_CASE ), -np.sin(__SCREAMING_SNAKE_CASE ), 0.0] ) lowercase_ : Tuple = np.cross(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) origins.append(__SCREAMING_SNAKE_CASE ) xs.append(__SCREAMING_SNAKE_CASE ) ys.append(__SCREAMING_SNAKE_CASE ) zs.append(__SCREAMING_SNAKE_CASE ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__SCREAMING_SNAKE_CASE )) , )
352
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
0
"""simple docstring""" import qiskit def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register lowercase_ : Tuple = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator lowercase_ : int = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =single_qubit_measure(2, 2) print(F"Total count for various states are: {counts}")
353
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ): try: lowercase_ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase_ : List[Any] = default else: # KEY is set, convert it to True or False. try: lowercase_ : Dict = strtobool(__SCREAMING_SNAKE_CASE ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value __SCREAMING_SNAKE_CASE =parse_flag_from_env("RUN_SLOW", default=False) def lowercase__( __SCREAMING_SNAKE_CASE : int ): return unittest.skip('Test was skipped' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None ): if test_case is None: return partial(__SCREAMING_SNAKE_CASE , version=__SCREAMING_SNAKE_CASE ) return unittest.skipUnless(is_torch_version('>=' , __SCREAMING_SNAKE_CASE ) , F'''test requires torch version >= {version}''' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__SCREAMING_SNAKE_CASE ) class UpperCamelCase ( unittest.TestCase ): lowercase = True @classmethod def _UpperCAmelCase ( cls ) -> int: '''simple docstring''' lowercase_ : Optional[int] = tempfile.mkdtemp() @classmethod def _UpperCAmelCase ( cls ) -> int: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__UpperCamelCase ) class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = mocks if isinstance(__UpperCamelCase ,(tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : int = AcceleratorState() lowercase_ : int = tensor[None].clone().to(state.device ) lowercase_ : List[str] = gather(__SCREAMING_SNAKE_CASE ).cpu() lowercase_ : str = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __SCREAMING_SNAKE_CASE ): return False return True class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = returncode lowercase_ : Optional[Any] = stdout lowercase_ : int = stderr async def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): while True: lowercase_ : Dict = await stream.readline() if line: callback(__SCREAMING_SNAKE_CASE ) else: break async def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False ): if echo: print('\nRunning: ' , ' '.join(__SCREAMING_SNAKE_CASE ) ) lowercase_ : int = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__SCREAMING_SNAKE_CASE , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase_ : Any = [] lowercase_ : str = [] def tee(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int="" ): lowercase_ : Optional[Any] = line.decode('utf-8' ).rstrip() sink.append(__SCREAMING_SNAKE_CASE ) if not quiet: print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , file=__SCREAMING_SNAKE_CASE ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ), ] , timeout=__SCREAMING_SNAKE_CASE , ) return _RunOutput(await p.wait() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=1_80 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True ): lowercase_ : Optional[int] = asyncio.get_event_loop() lowercase_ : List[Any] = loop.run_until_complete( _stream_subprocess(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , stdin=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE , echo=__SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[Any] = ' '.join(__SCREAMING_SNAKE_CASE ) if result.returncode > 0: lowercase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) return result class UpperCamelCase ( lowercase_ ): pass def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): try: lowercase_ : Optional[int] = subprocess.check_output(__SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__SCREAMING_SNAKE_CASE , 'decode' ): lowercase_ : Any = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F'''Command `{" ".join(__SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
354
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
0
"""simple docstring""" import math def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : float = 1 / 1_23_45 ): lowercase_ : Optional[Any] = 0 lowercase_ : Optional[int] = 0 lowercase_ : List[Any] = 3 while True: lowercase_ : Dict = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__SCREAMING_SNAKE_CASE ): lowercase_ : Any = int(__SCREAMING_SNAKE_CASE ) total_partitions += 1 if check_partition_perfect(__SCREAMING_SNAKE_CASE ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__SCREAMING_SNAKE_CASE ) integer += 1 if __name__ == "__main__": print(F"{solution() = }")
355
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): def __init__( self ,**__UpperCamelCase ) -> int: '''simple docstring''' requires_backends(self ,['bs4'] ) super().__init__(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = [] lowercase_ : int = [] lowercase_ : List[Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag lowercase_ : Optional[int] = parent.find_all(child.name ,recursive=__UpperCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__UpperCamelCase ) else next(i for i, s in enumerate(__UpperCamelCase ,1 ) if s is child ) ) lowercase_ : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = BeautifulSoup(__UpperCamelCase ,'html.parser' ) lowercase_ : List[Any] = [] lowercase_ : List[str] = [] lowercase_ : Optional[Any] = [] for element in html_code.descendants: if type(__UpperCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue lowercase_ : Dict = html.unescape(__UpperCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__UpperCamelCase ) lowercase_ : str = self.xpath_soup(__UpperCamelCase ) stringaxtag_seq.append(__UpperCamelCase ) stringaxsubs_seq.append(__UpperCamelCase ) if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError('Number of doc strings and xtags does not correspond' ) if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError('Number of doc strings and xsubs does not correspond' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = '' for tagname, subs in zip(__UpperCamelCase ,__UpperCamelCase ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self ,__UpperCamelCase ) -> BatchFeature: '''simple docstring''' lowercase_ : Optional[int] = False # Check that strings has a valid type if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Optional[Any] = True elif isinstance(__UpperCamelCase ,(list, tuple) ): if len(__UpperCamelCase ) == 0 or isinstance(html_strings[0] ,__UpperCamelCase ): lowercase_ : Tuple = True if not valid_strings: raise ValueError( 'HTML strings must of type `str`, `List[str]` (batch of examples), ' f'''but is of type {type(__UpperCamelCase )}.''' ) lowercase_ : Optional[Any] = bool(isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(html_strings[0] ,__UpperCamelCase )) ) if not is_batched: lowercase_ : Any = [html_strings] # Get nodes + xpaths lowercase_ : int = [] lowercase_ : Tuple = [] for html_string in html_strings: lowercase_ : Optional[int] = self.get_three_from_single(__UpperCamelCase ) nodes.append(__UpperCamelCase ) lowercase_ : Optional[int] = [] for node, tag_list, sub_list in zip(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Tuple = self.construct_xpath(__UpperCamelCase ,__UpperCamelCase ) xpath_strings.append(__UpperCamelCase ) xpaths.append(__UpperCamelCase ) # return as Dict lowercase_ : Dict = {'nodes': nodes, 'xpaths': xpaths} lowercase_ : int = BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase ) return encoded_inputs
356
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt", "YituTech/conv-bert-medium-small": ( "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt" ), "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt", } } __SCREAMING_SNAKE_CASE ={ "YituTech/conv-bert-base": 512, "YituTech/conv-bert-medium-small": 512, "YituTech/conv-bert-small": 512, } __SCREAMING_SNAKE_CASE ={ "YituTech/conv-bert-base": {"do_lower_case": True}, "YituTech/conv-bert-medium-small": {"do_lower_case": True}, "YituTech/conv-bert-small": {"do_lower_case": True}, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ConvBertTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> str: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : str = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : Union[str, Any] = do_lower_case lowercase_ : List[str] = strip_accents lowercase_ : List[str] = tokenize_chinese_chars lowercase_ : str = normalizer_class(**__UpperCamelCase ) lowercase_ : List[str] = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : List[str] = [self.sep_token_id] lowercase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
357
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCamelCase ( lowercase_ ): lowercase = 'megatron-bert' def __init__( self ,__UpperCamelCase=2_9056 ,__UpperCamelCase=1024 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase=4096 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = vocab_size lowercase_ : Optional[Any] = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : List[str] = num_attention_heads lowercase_ : Union[str, Any] = hidden_act lowercase_ : Tuple = intermediate_size lowercase_ : Any = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : List[Any] = max_position_embeddings lowercase_ : Optional[Any] = type_vocab_size lowercase_ : Any = initializer_range lowercase_ : str = layer_norm_eps lowercase_ : Optional[Any] = position_embedding_type lowercase_ : Tuple = use_cache
358
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
0
"""simple docstring""" import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]: lowercase_ : Tuple = multiprocessing.Manager() lowercase_ : str = manager.list() lowercase_ : List[str] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> List[Any]: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase_ : List[Any] = shutil.rmtree lowercase_ : Optional[Any] = os.rmdir lowercase_ : Tuple = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase_ : Union[str, Any] = {} with swallow_io(): with time_limit(__SCREAMING_SNAKE_CASE ): exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F'''failed: {e}''' ) # Needed for cleaning up. lowercase_ : List[str] = rmtree lowercase_ : List[str] = rmdir lowercase_ : Dict = chdir @contextlib.contextmanager def lowercase__( __SCREAMING_SNAKE_CASE : Any ) -> List[Any]: def signal_handler(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE ) signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowercase__( ) -> Any: lowercase_ : Tuple = WriteOnlyStringIO() with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ): with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ): with redirect_stdin(__SCREAMING_SNAKE_CASE ): yield @contextlib.contextmanager def lowercase__( ) -> Tuple: with tempfile.TemporaryDirectory() as dirname: with chdir(__SCREAMING_SNAKE_CASE ): yield dirname class UpperCamelCase ( lowercase_ ): pass class UpperCamelCase ( io.StringIO ): def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' raise OSError def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int: '''simple docstring''' raise OSError def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' raise OSError def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return False class UpperCamelCase ( contextlib._RedirectStream ): # type: ignore lowercase = 'stdin' @contextlib.contextmanager def lowercase__( __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: if root == ".": yield return lowercase_ : Any = os.getcwd() os.chdir(__SCREAMING_SNAKE_CASE ) try: yield except BaseException as exc: raise exc finally: os.chdir(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int=None ) -> List[Any]: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase_ : List[Any] = None lowercase_ : str = None import os lowercase_ : Any = '1' lowercase_ : Optional[int] = None lowercase_ : Dict = None lowercase_ : Union[str, Any] = None lowercase_ : List[Any] = None lowercase_ : Optional[int] = None lowercase_ : Union[str, Any] = None lowercase_ : Union[str, Any] = None lowercase_ : Optional[Any] = None lowercase_ : List[Any] = None lowercase_ : List[Any] = None lowercase_ : Union[str, Any] = None lowercase_ : Tuple = None lowercase_ : List[Any] = None lowercase_ : int = None lowercase_ : Optional[Any] = None lowercase_ : Any = None lowercase_ : List[Any] = None lowercase_ : List[str] = None lowercase_ : int = None lowercase_ : Tuple = None lowercase_ : Optional[int] = None lowercase_ : List[str] = None lowercase_ : Any = None lowercase_ : Dict = None lowercase_ : Optional[int] = None lowercase_ : Union[str, Any] = None lowercase_ : Optional[int] = None import shutil lowercase_ : Optional[int] = None lowercase_ : int = None lowercase_ : Optional[int] = None import subprocess lowercase_ : Optional[int] = None # type: ignore lowercase_ : Dict = None import sys lowercase_ : List[Any] = None lowercase_ : Tuple = None lowercase_ : Union[str, Any] = None lowercase_ : Optional[int] = None lowercase_ : List[Any] = None
359
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[Any] = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) lowercase_ : Dict = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowercase_ : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ : List[str] = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ : Union[str, Any] = model(__UpperCamelCase )['last_hidden_state'].detach() self.assertEqual(output.shape ,__UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCamelCase ,atol=1e-3 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) lowercase_ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowercase_ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ : Tuple = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ : List[Any] = model(__UpperCamelCase )['last_hidden_state'].detach() self.assertEqual(output.shape ,__UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCamelCase ,atol=1e-3 ) )
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( lowercase_ ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( lowercase_ ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
361
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
0
import math class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = 0.0 lowercase_ : Optional[Any] = 0.0 for i in range(len(__UpperCamelCase ) ): da += math.pow((sample[i] - weights[0][i]) ,2 ) da += math.pow((sample[i] - weights[1][i]) ,2 ) return 0 if da > da else 1 return 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> list[list[int | float]]: '''simple docstring''' for i in range(len(__UpperCamelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowercase__( ): # Training Examples ( m, n ) lowercase_ : List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) lowercase_ : str = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training lowercase_ : str = SelfOrganizingMap() lowercase_ : Optional[int] = 3 lowercase_ : int = 0.5 for _ in range(__SCREAMING_SNAKE_CASE ): for j in range(len(__SCREAMING_SNAKE_CASE ) ): # training sample lowercase_ : str = training_samples[j] # Compute the winning vector lowercase_ : Tuple = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Update the winning vector lowercase_ : Optional[int] = self_organizing_map.update(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # classify test sample lowercase_ : List[Any] = [0, 0, 0, 1] lowercase_ : Optional[int] = self_organizing_map.get_winner(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # results print(F'''Clusters that the test sample belongs to : {winner}''' ) print(F'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( lowercase_ ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
"""simple docstring""" import argparse import struct import unittest class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Tuple = data # Initialize hash values lowercase_ : Optional[int] = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants lowercase_ : List[Any] = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] lowercase_ : List[str] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCAmelCase ( __UpperCamelCase ) -> bytes: '''simple docstring''' lowercase_ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(__UpperCamelCase ) + 8) % 64)) lowercase_ : Optional[Any] = struct.pack('>Q' ,(len(__UpperCamelCase ) * 8) ) return data + padding + big_endian_integer def _UpperCAmelCase ( self ) -> None: '''simple docstring''' lowercase_ : Tuple = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowercase_ : Dict = list(struct.unpack('>16L' ,__UpperCamelCase ) ) # add 48 0-ed integers words += [0] * 48 lowercase_ : List[str] = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array lowercase_ : Tuple = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) lowercase_ : List[str] = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) lowercase_ : Dict = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression lowercase_ : Tuple = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 ) lowercase_ : int = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) lowercase_ : str = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 lowercase_ : Any = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 ) lowercase_ : Optional[int] = (a & b) ^ (a & c) ^ (b & c) lowercase_ : Any = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 lowercase_ : Optional[Any] = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) lowercase_ : Dict = [a, b, c, d, e, f, g, h] # Modify final values lowercase_ : List[str] = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] lowercase_ : Optional[int] = ''.join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> None: '''simple docstring''' import hashlib lowercase_ : str = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() ) def lowercase__( ): import doctest doctest.testmod() lowercase_ : Tuple = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) lowercase_ : Union[str, Any] = parser.parse_args() lowercase_ : List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase_ : Union[str, Any] = f.read() else: lowercase_ : Optional[Any] = bytes(__SCREAMING_SNAKE_CASE , 'utf-8' ) print(SHAaaa(__SCREAMING_SNAKE_CASE ).hash ) if __name__ == "__main__": main()
364
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
0
"""simple docstring""" import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) __SCREAMING_SNAKE_CASE ="bert-base-cased" __SCREAMING_SNAKE_CASE ="fp16" __SCREAMING_SNAKE_CASE ="bf16" __SCREAMING_SNAKE_CASE =[FPaa, BFaa] @require_fsdp @require_cuda class __lowerCamelCase ( lowercase_ ): def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowercase_ : str = dict( ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__UpperCamelCase ): lowercase_ : Dict = self.dist_env.copy() lowercase_ : Union[str, Any] = f'''{i + 1}''' lowercase_ : int = strategy with mockenv_context(**__UpperCamelCase ): lowercase_ : int = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__UpperCamelCase ): lowercase_ : Optional[Any] = self.dist_env.copy() lowercase_ : List[str] = prefetch_policy with mockenv_context(**__UpperCamelCase ): lowercase_ : List[Any] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__UpperCamelCase ): lowercase_ : Any = self.dist_env.copy() lowercase_ : List[Any] = state_dict_type with mockenv_context(**__UpperCamelCase ): lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Dict = AutoModel.from_pretrained(__UpperCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: lowercase_ : List[str] = self.dist_env.copy() lowercase_ : List[str] = policy if policy == "TRANSFORMER_BASED_WRAP": lowercase_ : str = 'BertLayer' elif policy == "SIZE_BASED_WRAP": lowercase_ : List[str] = '2000' with mockenv_context(**__UpperCamelCase ): lowercase_ : Dict = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) lowercase_ : Optional[int] = self.dist_env.copy() lowercase_ : Optional[int] = 'TRANSFORMER_BASED_WRAP' lowercase_ : Any = 'T5Layer' with mockenv_context(**__UpperCamelCase ): lowercase_ : Optional[int] = FullyShardedDataParallelPlugin() with self.assertRaises(__UpperCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase ) self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) ) lowercase_ : Optional[int] = self.dist_env.copy() lowercase_ : str = 'SIZE_BASED_WRAP' lowercase_ : Union[str, Any] = '0' with mockenv_context(**__UpperCamelCase ): lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: lowercase_ : List[Any] = self.dist_env.copy() lowercase_ : Optional[Any] = mp_dtype with mockenv_context(**__UpperCamelCase ): lowercase_ : Any = Accelerator() if mp_dtype == "fp16": lowercase_ : int = torch.floataa elif mp_dtype == "bf16": lowercase_ : str = torch.bfloataa lowercase_ : Optional[Any] = MixedPrecision(param_dtype=__UpperCamelCase ,reduce_dtype=__UpperCamelCase ,buffer_dtype=__UpperCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__UpperCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__UpperCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: lowercase_ : List[str] = self.dist_env.copy() lowercase_ : Optional[Any] = str(__UpperCamelCase ).lower() with mockenv_context(**__UpperCamelCase ): lowercase_ : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__UpperCamelCase ) ) @require_fsdp @require_multi_gpu @slow class __lowerCamelCase ( lowercase_ ): def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowercase_ : Union[str, Any] = 0.82 lowercase_ : List[str] = [ 'fsdp_shard_grad_op_transformer_based_wrap', 'fsdp_full_shard_transformer_based_wrap', ] lowercase_ : str = { 'multi_gpu_fp16': 3200, 'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000, 'fsdp_full_shard_transformer_based_wrap_fp16': 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } lowercase_ : Dict = 160 lowercase_ : Optional[Any] = 160 lowercase_ : str = inspect.getfile(accelerate.test_utils ) lowercase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[str] = os.path.join(self.test_scripts_folder ,'test_performance.py' ) lowercase_ : Optional[int] = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp'] for config in self.performance_configs: lowercase_ : str = cmd.copy() for i, strategy in enumerate(__UpperCamelCase ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append('--mixed_precision=no' ) else: cmd_config.append('--mixed_precision=fp16' ) if "cpu_offload" in config: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Dict = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' ) lowercase_ : Optional[int] = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp', '--mixed_precision=fp16', '--fsdp_transformer_layer_cls_to_wrap=BertLayer', ] for i, strategy in enumerate(__UpperCamelCase ): lowercase_ : Any = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue lowercase_ : int = len(__UpperCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: lowercase_ : int = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', '--partial_train_epoch=1', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() ) lowercase_ : int = cmd_config[:-1] lowercase_ : int = os.path.join(self.tmpdir ,'epoch_0' ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' ) lowercase_ : Dict = [ 'accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): lowercase_ : List[Any] = cmd.copy() if "fp16" in spec: cmd_config.extend(['--mixed_precision=fp16'] ) else: cmd_config.extend(['--mixed_precision=no'] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['--use_fsdp'] ) for i, strategy in enumerate(__UpperCamelCase ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append('--fsdp_offload_params=True' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('--fsdp_min_num_params=2000' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
365
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "spiece.model"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } __SCREAMING_SNAKE_CASE ={ "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) __SCREAMING_SNAKE_CASE =0 __SCREAMING_SNAKE_CASE =1 __SCREAMING_SNAKE_CASE =2 __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =4 class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = 'left' def __init__( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<sep>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<cls>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=["<eop>", "<eod>"] ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token lowercase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCamelCase ,remove_space=__UpperCamelCase ,keep_accents=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) lowercase_ : List[Any] = 3 lowercase_ : Optional[int] = do_lower_case lowercase_ : Optional[int] = remove_space lowercase_ : Any = keep_accents lowercase_ : Optional[Any] = vocab_file lowercase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.sp_model ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[str] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: '''simple docstring''' lowercase_ : Optional[Any] = self.__dict__.copy() lowercase_ : Optional[Any] = None return state def __setstate__( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : Any = {} lowercase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if self.remove_space: lowercase_ : Dict = ' '.join(inputs.strip().split() ) else: lowercase_ : int = inputs lowercase_ : Optional[Any] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' ) if not self.keep_accents: lowercase_ : List[str] = unicodedata.normalize('NFKD' ,__UpperCamelCase ) lowercase_ : Any = ''.join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] ) if self.do_lower_case: lowercase_ : Tuple = outputs.lower() return outputs def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = self.preprocess_text(__UpperCamelCase ) lowercase_ : Dict = self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase ) lowercase_ : List[str] = [] for piece in pieces: if len(__UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): lowercase_ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase ,'' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase_ : Dict = cur_pieces[1:] else: lowercase_ : List[str] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCamelCase ) else: new_pieces.append(__UpperCamelCase ) return new_pieces def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.sp_model.PieceToId(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' return self.sp_model.IdToPiece(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> str: '''simple docstring''' lowercase_ : List[Any] = kwargs.pop('use_source_tokenizer' ,__UpperCamelCase ) lowercase_ : str = self.convert_ids_to_tokens(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowercase_ : Tuple = [] lowercase_ : Tuple = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) ) lowercase_ : List[str] = [] sub_texts.append(__UpperCamelCase ) else: current_sub_text.append(__UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowercase_ : int = ''.join(__UpperCamelCase ) lowercase_ : Dict = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowercase_ : Optional[int] = self.clean_up_tokenization(__UpperCamelCase ) return clean_text else: return text def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : str = [self.sep_token_id] lowercase_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) if token_ids_a is not None: return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1] return ([0] * len(__UpperCamelCase )) + [1, 1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Any = [self.sep_token_id] lowercase_ : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Tuple = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : Tuple = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
366
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
0
"""simple docstring""" from __future__ import annotations def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Tuple: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase_ : Dict = array[indexa], array[indexa] def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> str: if length > 1: lowercase_ : List[str] = int(length / 2 ) for i in range(__SCREAMING_SNAKE_CASE , low + middle ): comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE ) bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> List[str]: if length > 1: lowercase_ : Optional[Any] = int(length / 2 ) bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 ) bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip() __SCREAMING_SNAKE_CASE =[int(item.strip()) for item in user_input.split(",")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("\nSorted array in ascending order is: ", end="") print(*unsorted, sep=", ") bitonic_merge(unsorted, 0, len(unsorted), 0) print("Sorted array in descending order is: ", end="") print(*unsorted, sep=", ")
367
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
0
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
368
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", } class UpperCamelCase ( lowercase_ ): lowercase = 'falcon' lowercase = ['past_key_values'] def __init__( self ,__UpperCamelCase=6_5024 ,__UpperCamelCase=4544 ,__UpperCamelCase=32 ,__UpperCamelCase=71 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=11 ,__UpperCamelCase=11 ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = vocab_size # Backward compatibility with n_embed kwarg lowercase_ : Union[str, Any] = kwargs.pop('n_embed' ,__UpperCamelCase ) lowercase_ : Optional[Any] = hidden_size if n_embed is None else n_embed lowercase_ : Tuple = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : str = layer_norm_epsilon lowercase_ : Any = initializer_range lowercase_ : List[str] = use_cache lowercase_ : Any = hidden_dropout lowercase_ : Optional[Any] = attention_dropout lowercase_ : Any = bos_token_id lowercase_ : Optional[Any] = eos_token_id lowercase_ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads lowercase_ : str = alibi lowercase_ : Tuple = new_decoder_architecture lowercase_ : Dict = multi_query # Ignored when new_decoder_architecture is True lowercase_ : Optional[int] = parallel_attn lowercase_ : Union[str, Any] = bias super().__init__(bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase ) @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return self.hidden_size // self.num_attention_heads @property def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return not self.alibi
369
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
0
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __SCREAMING_SNAKE_CASE =_symbol_database.Default() __SCREAMING_SNAKE_CASE =_descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) __SCREAMING_SNAKE_CASE =globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: __SCREAMING_SNAKE_CASE =None __SCREAMING_SNAKE_CASE =B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __SCREAMING_SNAKE_CASE =45 __SCREAMING_SNAKE_CASE =1581 __SCREAMING_SNAKE_CASE =1517 __SCREAMING_SNAKE_CASE =1570 __SCREAMING_SNAKE_CASE =1584 __SCREAMING_SNAKE_CASE =1793 __SCREAMING_SNAKE_CASE =1795 __SCREAMING_SNAKE_CASE =1916 __SCREAMING_SNAKE_CASE =1864 __SCREAMING_SNAKE_CASE =1905 __SCREAMING_SNAKE_CASE =1919 __SCREAMING_SNAKE_CASE =2429 __SCREAMING_SNAKE_CASE =2208 __SCREAMING_SNAKE_CASE =2418 __SCREAMING_SNAKE_CASE =2323 __SCREAMING_SNAKE_CASE =2407 # @@protoc_insertion_point(module_scope)
370
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
0
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Any: '''simple docstring''' lowercase_ : List[Any] = parent lowercase_ : List[str] = batch_size lowercase_ : int = image_size lowercase_ : List[str] = num_channels lowercase_ : int = num_stages lowercase_ : str = hidden_sizes lowercase_ : List[Any] = depths lowercase_ : List[str] = is_training lowercase_ : Dict = use_labels lowercase_ : Optional[Any] = intermediate_size lowercase_ : int = hidden_act lowercase_ : Any = type_sequence_label_size lowercase_ : List[str] = initializer_range lowercase_ : List[str] = out_features lowercase_ : Union[str, Any] = num_labels lowercase_ : Optional[Any] = scope lowercase_ : Optional[int] = num_stages def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Dict = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = UperNetForSemanticSegmentation(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[str] = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = self.prepare_config_and_inputs() ( lowercase_ ) : str = config_and_inputs lowercase_ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Tuple = UperNetModelTester(self ) lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Any = model_class(__UpperCamelCase ) lowercase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : str = [*signature.parameters.keys()] lowercase_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : int = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Tuple = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Tuple = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : str = _config_zero_init(__UpperCamelCase ) lowercase_ : Optional[int] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowercase_ : Dict = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @unittest.skip(reason='UperNet does not have tied weights' ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : List[Any] = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) lowercase_ : str = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' ) return image @require_torch @require_vision @slow class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) lowercase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase ) lowercase_ : int = prepare_img() lowercase_ : List[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) with torch.no_grad(): lowercase_ : int = model(**__UpperCamelCase ) lowercase_ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Optional[Any] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) lowercase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase ) lowercase_ : Union[str, Any] = prepare_img() lowercase_ : List[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) with torch.no_grad(): lowercase_ : Any = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
371
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
0
'''simple docstring''' import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def __lowerCAmelCase (__lowerCAmelCase ): if string == "True": return True elif string == "False": return False else: raise ValueError(F"""could not parse string as bool {string}""" ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
322
'''simple docstring''' from manim import * class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : List[Any] ) ->str: '''simple docstring''' _UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : int = Text("CPU" , font_size=24 ) _UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )] _UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : int = Text("GPU" , font_size=24 ) _UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) gpu.align_to(lowerCamelCase__ , lowerCamelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase__ ) _UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 ) _UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , ) _UpperCAmelCase : int = MarkupText( F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) _UpperCAmelCase : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase : Union[str, Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) ) self.add(lowerCamelCase__ ) _UpperCAmelCase : int = [] _UpperCAmelCase : List[str] = [] _UpperCAmelCase : Dict = [] for i, rect in enumerate(lowerCamelCase__ ): _UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase__ ) cpu_target.generate_target() _UpperCAmelCase : Dict = 0.4_6 / 4 _UpperCAmelCase : Any = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 ) cpu_targs.append(lowerCamelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) ) second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(*lowerCamelCase__ ) self.wait()
322
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : List[Any] = "mvp" lowerCAmelCase : int = ["past_key_values"] lowerCAmelCase : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowerCamelCase__ : int=5_02_67 , lowerCamelCase__ : str=10_24 , lowerCamelCase__ : Optional[Any]=12 , lowerCamelCase__ : Optional[int]=40_96 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=40_96 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Optional[int]=10_24 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=1 , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=2 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : str=1_00 , lowerCamelCase__ : List[Any]=8_00 , **lowerCamelCase__ : Optional[int] , ) ->Any: '''simple docstring''' _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : Any = max_position_embeddings _UpperCAmelCase : Dict = d_model _UpperCAmelCase : Union[str, Any] = encoder_ffn_dim _UpperCAmelCase : Union[str, Any] = encoder_layers _UpperCAmelCase : str = encoder_attention_heads _UpperCAmelCase : Tuple = decoder_ffn_dim _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : List[Any] = decoder_attention_heads _UpperCAmelCase : Optional[int] = dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Optional[int] = activation_dropout _UpperCAmelCase : List[Any] = activation_function _UpperCAmelCase : Union[str, Any] = init_std _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : List[Any] = decoder_layerdrop _UpperCAmelCase : List[Any] = classifier_dropout _UpperCAmelCase : str = use_cache _UpperCAmelCase : str = encoder_layers _UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCAmelCase : Tuple = use_prompt _UpperCAmelCase : List[str] = prompt_length _UpperCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowerCamelCase__ ): _UpperCAmelCase : Optional[int] = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." )
322
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ): _UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase ) _UpperCAmelCase : Dict = tok.pad_token_id def get_lens(__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = tqdm( DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _UpperCAmelCase : List[str] = [] for batch in dl: _UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist() _UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ): max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) ) else: max_lens.extend(__lowerCAmelCase ) return max_lens _UpperCAmelCase : Dict = get_lens(__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase ) pickle_save(__lowerCAmelCase , train_ds.len_file ) pickle_save(__lowerCAmelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
322
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging lowerCamelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : List[str] = ["input_features", "attention_mask"] def __init__( self : Any , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Any=1_60_00 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : Union[str, Any]=25 , lowerCamelCase__ : str="hamming_window" , lowerCamelCase__ : Any=3_2_7_6_8.0 , lowerCamelCase__ : Union[str, Any]=0.9_7 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=False , **lowerCamelCase__ : Optional[Any] , ) ->List[Any]: '''simple docstring''' super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Dict = feature_size _UpperCAmelCase : List[Any] = sampling_rate _UpperCAmelCase : Optional[int] = padding_value _UpperCAmelCase : Optional[int] = hop_length _UpperCAmelCase : int = win_length _UpperCAmelCase : str = frame_signal_scale _UpperCAmelCase : Union[str, Any] = preemphasis_coeff _UpperCAmelCase : Dict = mel_floor _UpperCAmelCase : Tuple = normalize_means _UpperCAmelCase : str = normalize_vars _UpperCAmelCase : List[str] = win_function _UpperCAmelCase : Optional[int] = return_attention_mask _UpperCAmelCase : Optional[Any] = win_length * sampling_rate // 10_00 _UpperCAmelCase : Tuple = hop_length * sampling_rate // 10_00 _UpperCAmelCase : Union[str, Any] = optimal_fft_length(self.sample_size ) _UpperCAmelCase : List[Any] = (self.n_fft // 2) + 1 def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.array ) ->np.ndarray: '''simple docstring''' if self.win_function == "hamming_window": _UpperCAmelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase__ ) else: _UpperCAmelCase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function ) _UpperCAmelCase : Optional[int] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) _UpperCAmelCase : Optional[int] = spectrogram( one_waveform * self.frame_signal_scale , window=lowerCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase__ , mel_floor=self.mel_floor , log_mel="log" , ) return msfc_features.T def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) ->Optional[int]: '''simple docstring''' if self.normalize_means: _UpperCAmelCase : Tuple = x[:input_length].mean(axis=0 ) _UpperCAmelCase : Dict = np.subtract(lowerCamelCase__ , lowerCamelCase__ ) if self.normalize_vars: _UpperCAmelCase : Any = x[:input_length].std(axis=0 ) _UpperCAmelCase : int = np.divide(lowerCamelCase__ , lowerCamelCase__ ) if input_length < x.shape[0]: _UpperCAmelCase : Any = padding_value # make sure array is in float32 _UpperCAmelCase : Union[str, Any] = x.astype(np.floataa ) return x def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[np.ndarray] , lowerCamelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]: '''simple docstring''' _UpperCAmelCase : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(lowerCamelCase__ , lowerCamelCase__ , self.padding_value ) for x, n in zip(lowerCamelCase__ , lowerCamelCase__ )] def __call__( self : Any , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : str , ) ->BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) _UpperCAmelCase : str = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _UpperCAmelCase : Dict = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ): _UpperCAmelCase : List[Any] = np.asarray(lowerCamelCase__ , dtype=np.floataa ) elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase : int = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase : Union[str, Any] = [raw_speech] # extract fbank features _UpperCAmelCase : Optional[int] = [self._extract_mfsc_features(lowerCamelCase__ ) for one_waveform in raw_speech] # convert into correct format for padding _UpperCAmelCase : Union[str, Any] = BatchFeature({"input_features": features} ) _UpperCAmelCase : List[str] = self.pad( lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) # make sure list is in array format _UpperCAmelCase : Tuple = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCamelCase__ ): _UpperCAmelCase : int = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features] _UpperCAmelCase : int = padded_inputs.get("attention_mask" ) if attention_mask is not None: _UpperCAmelCase : int = [np.asarray(lowerCamelCase__ , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: _UpperCAmelCase : Optional[int] = ( np.array(lowerCamelCase__ , dtype=np.intaa ) if self._get_padding_strategies(lowerCamelCase__ , max_length=lowerCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) _UpperCAmelCase : Optional[int] = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCamelCase__ ) if return_tensors is not None: _UpperCAmelCase : Any = padded_inputs.convert_to_tensors(lowerCamelCase__ ) return padded_inputs
322
'''simple docstring''' import pytest lowerCamelCase__ = '__dummy_dataset1__' lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = dataset_loading_script_name _UpperCAmelCase : Any = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py""" with open(__lowerCAmelCase , "w" ) as f: f.write(__lowerCAmelCase ) return str(__lowerCAmelCase )
322
1
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput lowerCamelCase__ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Any , *lowerCamelCase__ : str , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : str=None , **lowerCamelCase__ : Optional[int] ) ->Optional[int]: '''simple docstring''' super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = eval_examples _UpperCAmelCase : List[str] = post_process_function _UpperCAmelCase : List[Any] = quant_trainer_args _UpperCAmelCase : List[str] = 1_28 # default number of calibration samples def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any]=None ) ->Any: '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset." ) _UpperCAmelCase : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset _UpperCAmelCase : Union[str, Any] = self._remove_unused_columns(lowerCamelCase__ , description="Calibration" ) return DataLoader( lowerCamelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase__ , ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str]=None ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = self.train_dataset if calib_dataset is None else calib_dataset _UpperCAmelCase : int = self.get_calib_dataloader(lowerCamelCase__ ) _UpperCAmelCase : str = self.model quant_trainer.configure_model(lowerCamelCase__ , self.quant_trainer_args , calib=lowerCamelCase__ ) model.eval() quant_trainer.enable_calibration(lowerCamelCase__ ) logger.info("***** Running calibration *****" ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(lowerCamelCase__ ): # Prediction step _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.prediction_step(lowerCamelCase__ , lowerCamelCase__ , prediction_loss_only=lowerCamelCase__ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowerCamelCase__ , self.quant_trainer_args ) _UpperCAmelCase : Dict = model def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , lowerCamelCase__ : str = "eval" ) ->Dict: '''simple docstring''' _UpperCAmelCase : Any = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCAmelCase : Tuple = self.get_eval_dataloader(lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase : List[str] = self.compute_metrics _UpperCAmelCase : int = None _UpperCAmelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCAmelCase : Tuple = eval_loop( lowerCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , ) finally: _UpperCAmelCase : Tuple = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: _UpperCAmelCase : str = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , output.predictions ) _UpperCAmelCase : Optional[int] = self.compute_metrics(lowerCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase : List[Any] = metrics.pop(lowerCamelCase__ ) self.log(lowerCamelCase__ ) else: _UpperCAmelCase : List[str] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCAmelCase : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase__ ) return metrics def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : str = "test" ) ->Any: '''simple docstring''' _UpperCAmelCase : Any = self.get_test_dataloader(lowerCamelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCAmelCase : Dict = self.compute_metrics _UpperCAmelCase : Any = None _UpperCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCAmelCase : List[Any] = eval_loop( lowerCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase__ , ) finally: _UpperCAmelCase : Union[str, Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output _UpperCAmelCase : Optional[Any] = self.post_process_function(lowerCamelCase__ , lowerCamelCase__ , output.predictions , "predict" ) _UpperCAmelCase : Tuple = self.compute_metrics(lowerCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _UpperCAmelCase : str = metrics.pop(lowerCamelCase__ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase__ ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple="./" ) ->Any: '''simple docstring''' _UpperCAmelCase : Any = self.eval_dataset _UpperCAmelCase : Tuple = self.get_eval_dataloader(lowerCamelCase__ ) _UpperCAmelCase : Tuple = next(iter(lowerCamelCase__ ) ) # saving device - to make it consistent _UpperCAmelCase : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) # convert to tuple _UpperCAmelCase : List[Any] = tuple(v.to(lowerCamelCase__ ) for k, v in batch.items() ) logger.info("Converting model to be onnx compatible" ) from pytorch_quantization.nn import TensorQuantizer _UpperCAmelCase : Dict = True _UpperCAmelCase : str = self.model.to(lowerCamelCase__ ) model.eval() model.float() _UpperCAmelCase : int = model.module if hasattr(lowerCamelCase__ , "module" ) else model quant_trainer.configure_model(lowerCamelCase__ , self.quant_trainer_args ) _UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "model.onnx" ) logger.info(F"""exporting model to {output_model_file}""" ) _UpperCAmelCase : List[Any] = {0: "batch_size", 1: "seq_len"} torch.onnx.export( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , export_params=lowerCamelCase__ , opset_version=13 , do_constant_folding=lowerCamelCase__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, } , verbose=lowerCamelCase__ , ) logger.info("onnx export finished" )
322
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCamelCase__ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int: '''simple docstring''' import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any: '''simple docstring''' if NLTK_VERSION >= version.Version("3.6.5" ): _UpperCAmelCase : Dict = [ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] else: _UpperCAmelCase : Optional[int] = [ meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] return {"meteor": np.mean(lowerCamelCase__ )}
322
1
'''simple docstring''' import pytest lowerCamelCase__ = '__dummy_dataset1__' lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = dataset_loading_script_name _UpperCAmelCase : Any = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py""" with open(__lowerCAmelCase , "w" ) as f: f.write(__lowerCAmelCase ) return str(__lowerCAmelCase )
322
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): _UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()] return labels def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str: '''simple docstring''' if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0: raise ValueError("You must include at least one label and at least one sequence." ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( "The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. " "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(lowerCamelCase__ ) ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): _UpperCAmelCase : Optional[Any] = [sequences] _UpperCAmelCase : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(UpperCAmelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = args_parser super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]: '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("entail" ): return ind return -1 def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : int = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " " `pad_token=eos_token`" ) _UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token try: _UpperCAmelCase : List[str] = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , ) except Exception as e: if "too short" in str(lowerCamelCase__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. _UpperCAmelCase : List[Any] = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple: '''simple docstring''' if kwargs.get("multi_class" , lowerCamelCase__ ) is not None: _UpperCAmelCase : int = kwargs["multi_class"] logger.warning( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) _UpperCAmelCase : Dict = {} if "candidate_labels" in kwargs: _UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] ) if "hypothesis_template" in kwargs: _UpperCAmelCase : Dict = kwargs["hypothesis_template"] _UpperCAmelCase : List[str] = {} if "multi_label" in kwargs: _UpperCAmelCase : Optional[Any] = kwargs["multi_label"] return preprocess_params, {}, postprocess_params def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]: '''simple docstring''' if len(lowerCamelCase__ ) == 0: pass elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs: _UpperCAmelCase : int = args[0] else: raise ValueError(F"""Unable to understand extra arguments {args}""" ) return super().__call__(lowerCamelCase__ , **lowerCamelCase__ ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ): _UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase__ ) - 1, **model_input, } def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int: '''simple docstring''' _UpperCAmelCase : Dict = inputs["candidate_label"] _UpperCAmelCase : Optional[int] = inputs["sequence"] _UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names} _UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = { "candidate_label": candidate_label, "sequence": sequence, "is_last": inputs["is_last"], **outputs, } return model_outputs def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs] _UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs] _UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] ) _UpperCAmelCase : Optional[Any] = logits.shape[0] _UpperCAmelCase : Any = len(lowerCamelCase__ ) _UpperCAmelCase : str = N // n _UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently _UpperCAmelCase : int = self.entailment_id _UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0 _UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]] _UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) _UpperCAmelCase : str = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels _UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id] _UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
322
1
'''simple docstring''' lowerCamelCase__ = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = set() # keep track of all the paths to be checked _UpperCAmelCase : int = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _UpperCAmelCase : Union[str, Any] = queue.pop(0 ) # get the last node from the path _UpperCAmelCase : Tuple = path[-1] if node not in explored: _UpperCAmelCase : List[str] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _UpperCAmelCase : Optional[Any] = list(__lowerCAmelCase ) new_path.append(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__lowerCAmelCase ) # in case there's no path between the 2 nodes return [] def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _UpperCAmelCase : Union[str, Any] = [start] _UpperCAmelCase : Dict = set(__lowerCAmelCase ) # Keep tab on distances from `start` node. _UpperCAmelCase : Optional[int] = {start: 0, target: -1} while queue: _UpperCAmelCase : Any = queue.pop(0 ) if node == target: _UpperCAmelCase : int = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__lowerCAmelCase ) queue.append(__lowerCAmelCase ) _UpperCAmelCase : Dict = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ): _UpperCAmelCase : List[Any] = [] _UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Any = b, a + b return sum(__lowerCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
322
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable lowerCamelCase__ = list[list[float | int]] def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : int = len(__lowerCAmelCase ) _UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : float for row in range(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = matrix[row][col] _UpperCAmelCase : Optional[int] = vector[row][0] _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 0 while row < size and col < size: # pivoting _UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __lowerCAmelCase ): _UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase : Optional[Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __lowerCAmelCase ): for row in range(__lowerCAmelCase ): _UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col] for cola in range(__lowerCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase ) ] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : int = len(__lowerCAmelCase ) _UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : Matrix _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int for x_val, y_val in enumerate(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): _UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1) _UpperCAmelCase : int = y_val _UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase ) def interpolated_func(__lowerCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCAmelCase ) ) return interpolated_func def __lowerCAmelCase (__lowerCAmelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ): _UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase : int = 0 _UpperCAmelCase : Callable[[int], int] _UpperCAmelCase : int for poly in polynomials: _UpperCAmelCase : int = 1 while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ): x_val += 1 ret += poly(__lowerCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
322
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = parent _UpperCAmelCase : List[Any] = batch_size _UpperCAmelCase : Optional[int] = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : Dict = use_attention_mask _UpperCAmelCase : Optional[Any] = use_token_type_ids _UpperCAmelCase : int = use_labels _UpperCAmelCase : Optional[int] = vocab_size _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : int = hidden_act _UpperCAmelCase : int = hidden_dropout_prob _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : Union[str, Any] = max_position_embeddings _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : List[Any] = type_sequence_label_size _UpperCAmelCase : Optional[int] = initializer_range _UpperCAmelCase : Dict = num_choices def lowerCAmelCase__ ( self : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Dict = None if self.use_attention_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : int = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase__ ( self : Any ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Tuple = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs _UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase : Optional[int] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : int = FlaxAlbertModelTester(self ) @slow def lowerCAmelCase__ ( self : Any ) ->List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" ) _UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" ) _UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] _UpperCAmelCase : List[Any] = (1, 11, 7_68) self.assertEqual(output.shape , lowerCamelCase__ ) _UpperCAmelCase : str = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
322
1
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=1_000 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd _UpperCAmelCase : List[str] = n - 1 _UpperCAmelCase : int = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) _UpperCAmelCase : Dict = 0 while count < prec: _UpperCAmelCase : Tuple = random.randint(2 , n - 1 ) _UpperCAmelCase : Tuple = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if b != 1: _UpperCAmelCase : Tuple = True for _ in range(__lowerCAmelCase ): if b == n - 1: _UpperCAmelCase : Optional[int] = False break _UpperCAmelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCamelCase__ = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
322
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def __lowerCAmelCase (__lowerCAmelCase ): with open(__lowerCAmelCase , "rb" ) as f: _UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase ) return im.convert("RGB" ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." } , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} ) lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} ) lowerCAmelCase : Optional[float] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowerCAmelCase__ ( self : int ) ->List[str]: '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : str = field( default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) lowerCAmelCase : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} ) lowerCAmelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowerCAmelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] ) _UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def __lowerCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : Optional[Any] = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _UpperCAmelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: _UpperCAmelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: _UpperCAmelCase : List[Any] = {} if data_args.train_dir is not None: _UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: _UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" ) _UpperCAmelCase : Any = load_dataset( "imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0: _UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split ) _UpperCAmelCase : List[str] = split["train"] _UpperCAmelCase : Union[str, Any] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names _UpperCAmelCase , _UpperCAmelCase : int = {}, {} for i, label in enumerate(__lowerCAmelCase ): _UpperCAmelCase : int = str(__lowerCAmelCase ) _UpperCAmelCase : str = label # Load the accuracy metric from the datasets package _UpperCAmelCase : int = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) _UpperCAmelCase : Dict = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) _UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: _UpperCAmelCase : int = image_processor.size["shortest_edge"] else: _UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"]) _UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) _UpperCAmelCase : Optional[int] = Compose( [ RandomResizedCrop(__lowerCAmelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _UpperCAmelCase : Union[str, Any] = Compose( [ Resize(__lowerCAmelCase ), CenterCrop(__lowerCAmelCase ), ToTensor(), normalize, ] ) def train_transforms(__lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _UpperCAmelCase : Dict = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowerCAmelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _UpperCAmelCase : Optional[Any] = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowerCAmelCase ) # Initalize our trainer _UpperCAmelCase : Union[str, Any] = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase : Any = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : int = last_checkpoint _UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase : Dict = trainer.evaluate() trainer.log_metrics("eval" , __lowerCAmelCase ) trainer.save_metrics("eval" , __lowerCAmelCase ) # Write model card and (optionally) push to hub _UpperCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCAmelCase ) else: trainer.create_model_card(**__lowerCAmelCase ) if __name__ == "__main__": main()
322
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 ): _UpperCAmelCase , _UpperCAmelCase : int = [], [] _UpperCAmelCase : Union[str, Any] = list(zip(__lowerCAmelCase , __lowerCAmelCase ) ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = sorted_examples[0] def is_too_big(__lowerCAmelCase ): return tok(__lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): _UpperCAmelCase : Optional[Any] = new_src + " " + src _UpperCAmelCase : Optional[int] = new_tgt + " " + tgt if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example finished_src.append(__lowerCAmelCase ) finished_tgt.append(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : str = src, tgt else: # can fit, keep adding _UpperCAmelCase , _UpperCAmelCase : Any = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__lowerCAmelCase ) finished_tgt.append(__lowerCAmelCase ) return finished_src, finished_tgt def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[int] = Path(__lowerCAmelCase ) save_path.mkdir(exist_ok=__lowerCAmelCase ) for split in ["train"]: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" _UpperCAmelCase : List[Any] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()] _UpperCAmelCase : Optional[int] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()] _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) print(F"""packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.""" ) Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(__lowerCAmelCase ) ) Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(__lowerCAmelCase ) ) for split in ["val", "test"]: _UpperCAmelCase , _UpperCAmelCase : Dict = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.source""" ) shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.target""" ) def __lowerCAmelCase (): _UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=__lowerCAmelCase , default=128 ) parser.add_argument("--data_dir" , type=__lowerCAmelCase ) parser.add_argument("--save_path" , type=__lowerCAmelCase ) _UpperCAmelCase : List[Any] = parser.parse_args() _UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
322
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase__ = logging.get_logger(__name__) # General docstring lowerCamelCase__ = 'RegNetConfig' # Base docstring lowerCamelCase__ = 'facebook/regnet-y-040' lowerCamelCase__ = [1, 1_088, 7, 7] # Image classification docstring lowerCamelCase__ = 'facebook/regnet-y-040' lowerCamelCase__ = 'tabby, tabby cat' lowerCamelCase__ = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCAmelCase : Dict = tf.keras.layers.ConvaD( filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , ) _UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) _UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any: '''simple docstring''' _UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) ) _UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = config.num_channels _UpperCAmelCase : Any = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict: '''simple docstring''' _UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) ) _UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : int = tf.keras.layers.ConvaD( filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" ) _UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor: '''simple docstring''' return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ ) class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" ) _UpperCAmelCase : int = [ tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ ) for layer_module in self.attention: _UpperCAmelCase : str = layer_module(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1 _UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width ) _UpperCAmelCase : List[str] = ( TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCAmelCase : Optional[int] = [ TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ), ] _UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Any = hidden_state for layer_module in self.layers: _UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ ) hidden_state += residual _UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1 _UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width ) _UpperCAmelCase : Union[str, Any] = ( TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) _UpperCAmelCase : List[Any] = [ TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ), ] _UpperCAmelCase : int = ACTaFN[config.hidden_act] def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any: '''simple docstring''' _UpperCAmelCase : int = hidden_state for layer_module in self.layers: _UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ ) hidden_state += residual _UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer _UpperCAmelCase : List[str] = [ # downsampling is done in the first layer with stride of 2 layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ), *[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]: '''simple docstring''' for layer_module in self.layers: _UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) _UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention: '''simple docstring''' _UpperCAmelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,) _UpperCAmelCase : Dict = stage_module(lowerCamelCase__ ) if output_hidden_states: _UpperCAmelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ ) @keras_serializable class lowerCAmelCase__ ( tf.keras.layers.Layer ): lowerCAmelCase : Optional[Any] = RegNetConfig def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = config _UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" ) _UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" ) _UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" ) @unpack_inputs def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : str = self.encoder( lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : Dict = encoder_outputs[0] _UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ ) # Change to NCHW output format have uniformity in the modules _UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) _UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Tuple = RegNetConfig lowerCAmelCase : Tuple = "regnet" lowerCAmelCase : Union[str, Any] = "pixel_values" @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , ) class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]: '''simple docstring''' super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: '''simple docstring''' _UpperCAmelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.regnet( pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , ) class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any: '''simple docstring''' super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = config.num_labels _UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" ) # classification head _UpperCAmelCase : str = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: '''simple docstring''' _UpperCAmelCase : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.regnet( lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ ) _UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ ) _UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ ) if not return_dict: _UpperCAmelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
322
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : str = (DEISMultistepScheduler,) lowerCAmelCase : Tuple = (("num_inference_steps", 25),) def lowerCAmelCase__ ( self : str , **lowerCamelCase__ : Any ) ->str: '''simple docstring''' _UpperCAmelCase : Any = { "num_train_timesteps": 10_00, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, } config.update(**lowerCamelCase__ ) return config def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any=0 , **lowerCamelCase__ : int ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : str = dict(self.forward_default_kwargs ) _UpperCAmelCase : List[Any] = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) _UpperCAmelCase : Dict = self.dummy_sample _UpperCAmelCase : List[Any] = 0.1 * sample _UpperCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase : List[Any] = self.get_scheduler_config(**lowerCamelCase__ ) _UpperCAmelCase : List[Any] = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals _UpperCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ ) new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals _UpperCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase : str = sample, sample for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase : int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample _UpperCAmelCase : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self : List[str] ) ->Tuple: '''simple docstring''' pass def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int]=0 , **lowerCamelCase__ : Union[str, Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : str = dict(self.forward_default_kwargs ) _UpperCAmelCase : int = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) _UpperCAmelCase : Tuple = self.dummy_sample _UpperCAmelCase : int = 0.1 * sample _UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase : Dict = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample _UpperCAmelCase : Optional[int] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Any ) ->str: '''simple docstring''' if scheduler is None: _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : List[str] = self.get_scheduler_config(**lowerCamelCase__ ) _UpperCAmelCase : Tuple = scheduler_class(**lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] _UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ ) _UpperCAmelCase : str = scheduler_class(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = 10 _UpperCAmelCase : Dict = self.dummy_model() _UpperCAmelCase : Tuple = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample return sample def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[str] = dict(self.forward_default_kwargs ) _UpperCAmelCase : List[str] = kwargs.pop("num_inference_steps" , lowerCamelCase__ ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase : Optional[Any] = self.get_scheduler_config() _UpperCAmelCase : Any = scheduler_class(**lowerCamelCase__ ) _UpperCAmelCase : int = self.dummy_sample _UpperCAmelCase : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase__ , "set_timesteps" ): scheduler.set_timesteps(lowerCamelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , "set_timesteps" ): _UpperCAmelCase : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] _UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase : Optional[int] = scheduler.timesteps[5] _UpperCAmelCase : List[str] = scheduler.timesteps[6] _UpperCAmelCase : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample _UpperCAmelCase : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Dict = DEISMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase : int = self.full_loop(scheduler=lowerCamelCase__ ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 _UpperCAmelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase : Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase : Optional[int] = self.full_loop(scheduler=lowerCamelCase__ ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 def lowerCAmelCase__ ( self : List[str] ) ->Dict: '''simple docstring''' for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: '''simple docstring''' self.check_over_configs(thresholding=lowerCamelCase__ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="deis" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Any: '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , ) _UpperCAmelCase : Any = self.full_loop( solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , ) assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers" def lowerCAmelCase__ ( self : Optional[int] ) ->Dict: '''simple docstring''' self.check_over_configs(lower_order_final=lowerCamelCase__ ) self.check_over_configs(lower_order_final=lowerCamelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 ) def lowerCAmelCase__ ( self : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : List[str] = self.full_loop() _UpperCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 def lowerCAmelCase__ ( self : str ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3 def lowerCAmelCase__ ( self : List[str] ) ->Any: '''simple docstring''' _UpperCAmelCase : Dict = self.scheduler_classes[0] _UpperCAmelCase : Dict = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 ) _UpperCAmelCase : Any = scheduler_class(**lowerCamelCase__ ) _UpperCAmelCase : List[Any] = 10 _UpperCAmelCase : Union[str, Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample assert sample.dtype == torch.floataa
322
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCAmelCase (__lowerCAmelCase ): if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ): return False return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ): _UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : Optional[int] = model _UpperCAmelCase : Any = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = model.module if not keep_fpaa_wrapper: _UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" ) _UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase ) if original_forward is not None: while hasattr(__lowerCAmelCase , "__wrapped__" ): _UpperCAmelCase : Optional[int] = forward.__wrapped__ if forward == original_forward: break _UpperCAmelCase : Dict = forward if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ): convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : int = model _UpperCAmelCase : str = compiled_model return model def __lowerCAmelCase (): PartialState().wait_for_everyone() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): if PartialState().distributed_type == DistributedType.TPU: xm.save(__lowerCAmelCase , __lowerCAmelCase ) elif PartialState().local_process_index == 0: torch.save(__lowerCAmelCase , __lowerCAmelCase ) @contextmanager def __lowerCAmelCase (**__lowerCAmelCase ): for key, value in kwargs.items(): _UpperCAmelCase : str = str(__lowerCAmelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCAmelCase (__lowerCAmelCase ): if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ): _UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , "__qualname__" ): return obj.__qualname__ if hasattr(__lowerCAmelCase , "__name__" ): return obj.__name__ return str(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): for key, value in source.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} ) merge_dicts(__lowerCAmelCase , __lowerCAmelCase ) else: _UpperCAmelCase : Optional[int] = value return destination def __lowerCAmelCase (__lowerCAmelCase = None ): if port is None: _UpperCAmelCase : Tuple = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
322
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCamelCase__ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int: '''simple docstring''' import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any: '''simple docstring''' if NLTK_VERSION >= version.Version("3.6.5" ): _UpperCAmelCase : Dict = [ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] else: _UpperCAmelCase : Optional[int] = [ meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] return {"meteor": np.mean(lowerCamelCase__ )}
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
322
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') lowerCamelCase__ = int(input('Enter number: ').strip()) print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
322
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : List[str] = "openai/whisper-base" lowerCAmelCase : Optional[int] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) lowerCAmelCase : Optional[int] = "transcriber" lowerCAmelCase : List[str] = WhisperProcessor lowerCAmelCase : Optional[Any] = WhisperForConditionalGeneration lowerCAmelCase : List[Any] = ["audio"] lowerCAmelCase : List[str] = ["text"] def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Optional[Any]: '''simple docstring''' return self.pre_processor(lowerCamelCase__ , return_tensors="pt" ).input_features def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) ->str: '''simple docstring''' return self.model.generate(inputs=lowerCamelCase__ ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Dict ) ->int: '''simple docstring''' return self.pre_processor.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )[0]
322
'''simple docstring''' from collections.abc import Sequence def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Dict = 0.0 for coeff in reversed(__lowerCAmelCase ): _UpperCAmelCase : int = result * x + coeff return result if __name__ == "__main__": lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0) lowerCamelCase__ = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
322
1
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) lowerCamelCase__ = logging.getLogger() def __lowerCAmelCase (): _UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("-f" ) _UpperCAmelCase : int = parser.parse_args() return args.f class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : str ) ->None: '''simple docstring''' _UpperCAmelCase : Dict = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCamelCase__ ) def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : int = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , "run_glue_deebert.py" ) with patch.object(lowerCamelCase__ , "argv" , lowerCamelCase__ ): _UpperCAmelCase : Any = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowerCamelCase__ , 0.6_6_6 ) @slow @require_torch_non_multi_gpu def lowerCAmelCase__ ( self : str ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowerCamelCase__ ) _UpperCAmelCase : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(lowerCamelCase__ )
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[Any] = len(__lowerCAmelCase ) _UpperCAmelCase : Tuple = sum(__lowerCAmelCase ) _UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): _UpperCAmelCase : Any = True for i in range(1 , s + 1 ): _UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): _UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: _UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: _UpperCAmelCase : List[Any] = s - 2 * j break return diff
322
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCAmelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} ) lowerCAmelCase : ClassVar[Features] = Features({"summary": Value("string" )} ) lowerCAmelCase : str = "text" lowerCAmelCase : str = "summary" @property def lowerCAmelCase__ ( self : Tuple ) ->Dict[str, str]: '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
322
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase : int = "resnet" lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"] def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) _UpperCAmelCase : str = num_channels _UpperCAmelCase : List[str] = embedding_size _UpperCAmelCase : Tuple = hidden_sizes _UpperCAmelCase : Dict = depths _UpperCAmelCase : List[Any] = layer_type _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : Tuple = downsample_in_first_stage _UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names ) class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Optional[Any] = version.parse("1.11" ) @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase__ ( self : str ) ->float: '''simple docstring''' return 1E-3
322
1
'''simple docstring''' import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowerCamelCase__ = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } lowerCamelCase__ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' lowerCamelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __lowerCAmelCase (__lowerCAmelCase ): return x[0] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : int = get_letter_count(__lowerCAmelCase ) _UpperCAmelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCAmelCase ) _UpperCAmelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = "".join(freq_to_letter[freq] ) _UpperCAmelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCAmelCase , reverse=__lowerCAmelCase ) _UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = get_frequency_order(__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
322
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase__ = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __lowerCAmelCase (__lowerCAmelCase ): if isinstance(__lowerCAmelCase , torch.Tensor ): return image elif isinstance(__lowerCAmelCase , PIL.Image.Image ): _UpperCAmelCase : int = [image] _UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image] _UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase ) return image class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM _UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]: '''simple docstring''' if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ ) _UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 ) _UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str: '''simple docstring''' if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" ) _UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) _UpperCAmelCase : List[str] = init_latents.shape _UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ ) # get latents print("add noise to latents at timestep" , lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) _UpperCAmelCase : List[Any] = init_latents return latents @torch.no_grad() def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: '''simple docstring''' self.check_inputs(lowerCamelCase__ ) # 2. Preprocess image _UpperCAmelCase : Dict = preprocess(lowerCamelCase__ ) # 3. set timesteps self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device ) _UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device ) _UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ ) # 4. Prepare latent variables _UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ ) _UpperCAmelCase : Any = latents # 5. Denoising loop for t in self.progress_bar(lowerCamelCase__ ): # 1. predict noise model_output _UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _UpperCAmelCase : int = self.scheduler.step( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample _UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowerCamelCase__ )
322
1
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[Any] = len(__lowerCAmelCase ) _UpperCAmelCase : Tuple = sum(__lowerCAmelCase ) _UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): _UpperCAmelCase : Any = True for i in range(1 , s + 1 ): _UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): _UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: _UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: _UpperCAmelCase : List[Any] = s - 2 * j break return diff
322
'''simple docstring''' from __future__ import annotations from collections.abc import Callable lowerCamelCase__ = list[list[float | int]] def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : int = len(__lowerCAmelCase ) _UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : float for row in range(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = matrix[row][col] _UpperCAmelCase : Optional[int] = vector[row][0] _UpperCAmelCase : int = 0 _UpperCAmelCase : Union[str, Any] = 0 while row < size and col < size: # pivoting _UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __lowerCAmelCase ): _UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col] _UpperCAmelCase : Optional[Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __lowerCAmelCase ): for row in range(__lowerCAmelCase ): _UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col] for cola in range(__lowerCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase ) ] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : int = len(__lowerCAmelCase ) _UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )] _UpperCAmelCase : Matrix _UpperCAmelCase : int _UpperCAmelCase : int _UpperCAmelCase : int for x_val, y_val in enumerate(__lowerCAmelCase ): for col in range(__lowerCAmelCase ): _UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1) _UpperCAmelCase : int = y_val _UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase ) def interpolated_func(__lowerCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCAmelCase ) ) return interpolated_func def __lowerCAmelCase (__lowerCAmelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ): _UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )] _UpperCAmelCase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _UpperCAmelCase : int = 0 _UpperCAmelCase : Callable[[int], int] _UpperCAmelCase : int for poly in polynomials: _UpperCAmelCase : int = 1 while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ): x_val += 1 ret += poly(__lowerCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
322
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'tensor(bool)': np.bool_, 'tensor(int8)': np.inta, 'tensor(uint8)': np.uinta, 'tensor(int16)': np.intaa, 'tensor(uint16)': np.uintaa, 'tensor(int32)': np.intaa, 'tensor(uint32)': np.uintaa, 'tensor(int64)': np.intaa, 'tensor(uint64)': np.uintaa, 'tensor(float16)': np.floataa, 'tensor(float)': np.floataa, 'tensor(double)': np.floataa, } class lowerCAmelCase__ : def __init__( self : int , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) _UpperCAmelCase : int = model _UpperCAmelCase : List[Any] = kwargs.get("model_save_dir" , lowerCamelCase__ ) _UpperCAmelCase : int = kwargs.get("latest_model_name" , lowerCamelCase__ ) def __call__( self : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : List[str] = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()} return self.model.run(lowerCamelCase__ , lowerCamelCase__ ) @staticmethod def lowerCAmelCase__ ( lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Any=None , lowerCamelCase__ : Union[str, Any]=None ) ->Optional[int]: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) _UpperCAmelCase : Any = "CPUExecutionProvider" return ort.InferenceSession(lowerCamelCase__ , providers=[provider] , sess_options=lowerCamelCase__ ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : Any ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME _UpperCAmelCase : Dict = self.model_save_dir.joinpath(self.latest_model_name ) _UpperCAmelCase : List[str] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ ) try: shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) _UpperCAmelCase : Dict = self.model_save_dir.joinpath(lowerCamelCase__ ) if src_path.exists(): _UpperCAmelCase : List[Any] = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ ) try: shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ ) except shutil.SameFileError: pass def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Union[str, Any] , ) ->int: '''simple docstring''' if os.path.isfile(lowerCamelCase__ ): logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) # saving model weights/files self._save_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def lowerCAmelCase__ ( cls : Any , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[Union[bool, str, None]] = None , lowerCamelCase__ : Optional[Union[str, None]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional["ort.SessionOptions"] = None , **lowerCamelCase__ : List[str] , ) ->str: '''simple docstring''' _UpperCAmelCase : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowerCamelCase__ ): _UpperCAmelCase : Any = OnnxRuntimeModel.load_model( os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ ) _UpperCAmelCase : Dict = Path(lowerCamelCase__ ) # load model from hub else: # download model _UpperCAmelCase : Optional[int] = hf_hub_download( repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , ) _UpperCAmelCase : Any = Path(lowerCamelCase__ ).parent _UpperCAmelCase : List[Any] = Path(lowerCamelCase__ ).name _UpperCAmelCase : int = OnnxRuntimeModel.load_model(lowerCamelCase__ , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ ) return cls(model=lowerCamelCase__ , **lowerCamelCase__ ) @classmethod def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : Dict , ) ->Dict: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = None if len(str(lowerCamelCase__ ).split("@" ) ) == 2: _UpperCAmelCase , _UpperCAmelCase : Dict = model_id.split("@" ) return cls._from_pretrained( model_id=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , **lowerCamelCase__ , )
322
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
322
1
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Tuple = [0] * len(__lowerCAmelCase ) _UpperCAmelCase : int = [] _UpperCAmelCase : Optional[Any] = [] _UpperCAmelCase : Optional[Any] = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__lowerCAmelCase ) ): if indegree[i] == 0: queue.append(__lowerCAmelCase ) while queue: _UpperCAmelCase : Optional[int] = queue.pop(0 ) cnt += 1 topo.append(__lowerCAmelCase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(__lowerCAmelCase ) if cnt != len(__lowerCAmelCase ): print("Cycle exists" ) else: print(__lowerCAmelCase ) # Adjacency List of Graph lowerCamelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
322
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCamelCase__ = TypeVar('T') class lowerCAmelCase__ ( Generic[T] ): def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple: '''simple docstring''' _UpperCAmelCase : Dict = data _UpperCAmelCase : Node[T] | None = None def __str__( self : Any ) ->str: '''simple docstring''' return F"""{self.data}""" class lowerCAmelCase__ ( Generic[T] ): def __init__( self : Tuple ) ->None: '''simple docstring''' _UpperCAmelCase : Node[T] | None = None def __iter__( self : List[str] ) ->Iterator[T]: '''simple docstring''' _UpperCAmelCase : Any = self.top while node: yield node.data _UpperCAmelCase : Dict = node.next def __str__( self : Dict ) ->str: '''simple docstring''' return "->".join([str(lowerCamelCase__ ) for item in self] ) def __len__( self : Optional[int] ) ->int: '''simple docstring''' return len(tuple(iter(self ) ) ) def lowerCAmelCase__ ( self : List[Any] ) ->bool: '''simple docstring''' return self.top is None def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None: '''simple docstring''' _UpperCAmelCase : List[Any] = Node(lowerCamelCase__ ) if not self.is_empty(): _UpperCAmelCase : Tuple = self.top _UpperCAmelCase : List[str] = node def lowerCAmelCase__ ( self : Union[str, Any] ) ->T: '''simple docstring''' if self.is_empty(): raise IndexError("pop from empty stack" ) assert isinstance(self.top , lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = self.top _UpperCAmelCase : Optional[Any] = self.top.next return pop_node.data def lowerCAmelCase__ ( self : Union[str, Any] ) ->T: '''simple docstring''' if self.is_empty(): raise IndexError("peek from empty stack" ) assert self.top is not None return self.top.data def lowerCAmelCase__ ( self : List[Any] ) ->None: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = None if __name__ == "__main__": from doctest import testmod testmod()
322
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp lowerCamelCase__ = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } lowerCamelCase__ = { 'RUCAIBox/mvp': 1_024, } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Any = ["input_ids", "attention_mask"] lowerCAmelCase : List[Any] = MvpTokenizer def __init__( self : Any , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any="replace" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Dict="</s>" , lowerCamelCase__ : List[Any]="</s>" , lowerCamelCase__ : str="<s>" , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : Tuple="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[Any]=True , **lowerCamelCase__ : Any , ) ->str: '''simple docstring''' super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) _UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: _UpperCAmelCase : Optional[int] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) _UpperCAmelCase : List[str] = add_prefix_space _UpperCAmelCase : Any = pre_tok_class(**lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _UpperCAmelCase : Dict = "post_processor" _UpperCAmelCase : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: _UpperCAmelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase : Any = tuple(state["sep"] ) if "cls" in state: _UpperCAmelCase : int = tuple(state["cls"] ) _UpperCAmelCase : int = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: _UpperCAmelCase : Dict = add_prefix_space _UpperCAmelCase : List[Any] = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: _UpperCAmelCase : str = trim_offsets _UpperCAmelCase : Dict = True if changes_to_apply: _UpperCAmelCase : Optional[int] = getattr(lowerCamelCase__ , state.pop("type" ) ) _UpperCAmelCase : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def lowerCAmelCase__ ( self : Optional[Any] ) ->str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value _UpperCAmelCase : Union[str, Any] = value def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : int ) ->BatchEncoding: '''simple docstring''' _UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCAmelCase__ ( self : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ) ->BatchEncoding: '''simple docstring''' _UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int=None ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]: '''simple docstring''' _UpperCAmelCase : List[Any] = [self.sep_token_id] _UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
322
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : int = "speech_to_text_2" lowerCAmelCase : str = ["past_key_values"] lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Any = vocab_size _UpperCAmelCase : Optional[int] = d_model _UpperCAmelCase : List[Any] = decoder_ffn_dim _UpperCAmelCase : Any = decoder_layers _UpperCAmelCase : int = decoder_attention_heads _UpperCAmelCase : Any = dropout _UpperCAmelCase : List[Any] = attention_dropout _UpperCAmelCase : Optional[int] = activation_dropout _UpperCAmelCase : List[Any] = activation_function _UpperCAmelCase : int = init_std _UpperCAmelCase : Dict = decoder_layerdrop _UpperCAmelCase : str = use_cache _UpperCAmelCase : Union[str, Any] = decoder_layers _UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True _UpperCAmelCase : Any = max_target_positions super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
322
1
'''simple docstring''' import os from datetime import datetime as dt from github import Github lowerCamelCase__ = [ 'good first issue', 'feature request', 'wip', ] def __lowerCAmelCase (): _UpperCAmelCase : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] ) _UpperCAmelCase : int = g.get_repo("huggingface/accelerate" ) _UpperCAmelCase : Tuple = repo.get_issues(state="open" ) for issue in open_issues: _UpperCAmelCase : Any = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) _UpperCAmelCase : List[Any] = comments[0] if len(__lowerCAmelCase ) > 0 else None _UpperCAmelCase : Union[str, Any] = dt.utcnow() _UpperCAmelCase : Union[str, Any] = (current_time - issue.updated_at).days _UpperCAmelCase : int = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
322
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu' def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ): _UpperCAmelCase : Any = text.split(__lowerCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Dict = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(__lowerCAmelCase ): titles.append(title if title is not None else "" ) texts.append(__lowerCAmelCase ) return {"title": titles, "text": texts} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : str = ctx_tokenizer( documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"] _UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): ###################################### logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _UpperCAmelCase : Optional[int] = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings _UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _UpperCAmelCase : Dict = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space _UpperCAmelCase : int = dataset.map( partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , ) # And finally save your dataset _UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(__lowerCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase ) # And save the index _UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(__lowerCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase__ : lowerCAmelCase : str = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) lowerCAmelCase : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) lowerCAmelCase : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) lowerCAmelCase : Optional[str] = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) lowerCAmelCase : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) lowerCAmelCase : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
322
1
'''simple docstring''' from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __lowerCAmelCase (__lowerCAmelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool: _UpperCAmelCase : Tuple = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCAmelCase : str = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__lowerCAmelCase ) ) # The ratio of the area for circle to square is pi/4. _UpperCAmelCase : List[str] = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , ): return mean( function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 ): def identity_function(__lowerCAmelCase ) -> float: return x _UpperCAmelCase : Tuple = area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def __lowerCAmelCase (__lowerCAmelCase ): def function_to_integrate(__lowerCAmelCase ) -> float: return sqrt(4.0 - x * x ) _UpperCAmelCase : List[str] = area_under_curve_estimator( __lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
322
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 lowerCamelCase__ = { 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 128, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 50, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 10, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 10, 'exponential_decay_length_penalty': (5, 1.01), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): @classmethod def lowerCAmelCase__ ( cls : List[str] ) ->str: '''simple docstring''' _UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(lowerCamelCase__ ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int: '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def lowerCAmelCase__ ( self : int ) ->Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) _UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token ) _UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : str = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) _UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token ) _UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : List[str] ) ->Any: '''simple docstring''' CustomConfig.register_for_auto_class() _UpperCAmelCase : int = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) _UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated _UpperCAmelCase : Any = c.n_embd + 1 # int _UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float _UpperCAmelCase : Tuple = not c.scale_attn_weights # bool _UpperCAmelCase : List[Any] = c.summary_type + "foo" # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = PretrainedConfig() _UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) _UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )] if len(lowerCamelCase__ ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" F""" {', '.join(lowerCamelCase__ )}.""" ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) _UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(lowerCamelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = mock.Mock() _UpperCAmelCase : List[str] = 5_00 _UpperCAmelCase : Dict = {} _UpperCAmelCase : Tuple = HTTPError _UpperCAmelCase : Any = {} # Download this model to make sure it's in the cache. _UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head: _UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase__ ( self : Optional[int] ) ->Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" ) _UpperCAmelCase : str = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCamelCase__ ) _UpperCAmelCase : Dict = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 _UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 _UpperCAmelCase : Dict = ["config.42.0.0.json"] _UpperCAmelCase : Union[str, Any] = 7_68 configuration.save_pretrained(lowerCamelCase__ ) shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) ) _UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def lowerCAmelCase__ ( self : List[str] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs" import transformers as new_transformers _UpperCAmelCase : Any = "v4.0.0" _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCamelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers _UpperCAmelCase : List[Any] = "v3.0.0" _UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertEqual(old_configuration.hidden_size , 7_68 )
322
1
'''simple docstring''' class lowerCAmelCase__ : def __init__( self : str ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : str = {} def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->str: '''simple docstring''' if vertex not in self.adjacency: _UpperCAmelCase : Any = {} self.num_vertices += 1 def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) ->Union[str, Any]: '''simple docstring''' self.add_vertex(lowerCamelCase__ ) self.add_vertex(lowerCamelCase__ ) if head == tail: return _UpperCAmelCase : List[Any] = weight _UpperCAmelCase : List[Any] = weight def lowerCAmelCase__ ( self : Any ) ->Any: '''simple docstring''' _UpperCAmelCase : Tuple = self.get_edges() for edge in edges: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = edge edges.remove((tail, head, weight) ) for i in range(len(lowerCamelCase__ ) ): _UpperCAmelCase : str = list(edges[i] ) edges.sort(key=lambda lowerCamelCase__ : e[2] ) for i in range(len(lowerCamelCase__ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _UpperCAmelCase : Any = edges[i][2] + 1 for edge in edges: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = edge _UpperCAmelCase : Optional[int] = weight _UpperCAmelCase : Union[str, Any] = weight def __str__( self : Optional[Any] ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Any = "" for tail in self.adjacency: for head in self.adjacency[tail]: _UpperCAmelCase : Optional[Any] = self.adjacency[head][tail] string += F"""{head} -> {tail} == {weight}\n""" return string.rstrip("\n" ) def lowerCAmelCase__ ( self : str ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' return self.adjacency.keys() @staticmethod def lowerCAmelCase__ ( lowerCamelCase__ : Tuple=None , lowerCamelCase__ : str=None ) ->int: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = Graph() if vertices is None: _UpperCAmelCase : str = [] if edges is None: _UpperCAmelCase : Optional[int] = [] for vertex in vertices: g.add_vertex(lowerCamelCase__ ) for edge in edges: g.add_edge(*lowerCamelCase__ ) return g class lowerCAmelCase__ : def __init__( self : Union[str, Any] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Dict = {} def __len__( self : Optional[int] ) ->int: '''simple docstring''' return len(self.parent ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->List[str]: '''simple docstring''' if item in self.parent: return self.find(lowerCamelCase__ ) _UpperCAmelCase : str = item _UpperCAmelCase : Any = 0 return item def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] ) ->Tuple: '''simple docstring''' if item not in self.parent: return self.make_set(lowerCamelCase__ ) if item != self.parent[item]: _UpperCAmelCase : str = self.find(self.parent[item] ) return self.parent[item] def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[int] = self.find(lowerCamelCase__ ) _UpperCAmelCase : List[str] = self.find(lowerCamelCase__ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _UpperCAmelCase : Optional[int] = roota return roota if self.rank[roota] < self.rank[roota]: _UpperCAmelCase : Tuple = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _UpperCAmelCase : List[Any] = roota return roota return None @staticmethod def lowerCAmelCase__ ( lowerCamelCase__ : Optional[Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Dict = graph.num_vertices _UpperCAmelCase : Tuple = Graph.UnionFind() _UpperCAmelCase : Tuple = [] while num_components > 1: _UpperCAmelCase : Optional[int] = {} for vertex in graph.get_vertices(): _UpperCAmelCase : Dict = -1 _UpperCAmelCase : int = graph.get_edges() for edge in edges: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = edge edges.remove((tail, head, weight) ) for edge in edges: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = edge _UpperCAmelCase : Any = union_find.find(lowerCamelCase__ ) _UpperCAmelCase : Tuple = union_find.find(lowerCamelCase__ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _UpperCAmelCase : Tuple = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _UpperCAmelCase : Any = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = cheap_edge[vertex] if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ): union_find.union(lowerCamelCase__ , lowerCamelCase__ ) mst_edges.append(cheap_edge[vertex] ) _UpperCAmelCase : Union[str, Any] = num_components - 1 _UpperCAmelCase : List[str] = Graph.build(edges=lowerCamelCase__ ) return mst
322
'''simple docstring''' from manim import * class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : List[Any] ) ->str: '''simple docstring''' _UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : int = Text("CPU" , font_size=24 ) _UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )] _UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : int = Text("GPU" , font_size=24 ) _UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) gpu.align_to(lowerCamelCase__ , lowerCamelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase__ ) _UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )] _UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) _UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 ) _UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , ) _UpperCAmelCase : int = MarkupText( F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) _UpperCAmelCase : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase : Union[str, Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) ) self.add(lowerCamelCase__ ) _UpperCAmelCase : int = [] _UpperCAmelCase : List[str] = [] _UpperCAmelCase : Dict = [] for i, rect in enumerate(lowerCamelCase__ ): _UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase__ ) cpu_target.generate_target() _UpperCAmelCase : Dict = 0.4_6 / 4 _UpperCAmelCase : Any = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 ) cpu_targs.append(lowerCamelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) ) second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(*lowerCamelCase__ ) self.wait()
322
1
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __lowerCAmelCase (*__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = list(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): _UpperCAmelCase : str = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __lowerCAmelCase (__lowerCAmelCase = None , __lowerCAmelCase = 128 ): if function is None: return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase ) _UpperCAmelCase : Any = starting_batch_size def decorator(*__lowerCAmelCase , **__lowerCAmelCase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase : str = list(inspect.signature(__lowerCAmelCase ).parameters.keys() ) # Guard against user error if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1): _UpperCAmelCase : Dict = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) except Exception as e: if should_reduce_batch_size(__lowerCAmelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
322
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ): _UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase ) _UpperCAmelCase : Dict = tok.pad_token_id def get_lens(__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = tqdm( DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _UpperCAmelCase : List[str] = [] for batch in dl: _UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist() _UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ): max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) ) else: max_lens.extend(__lowerCAmelCase ) return max_lens _UpperCAmelCase : Dict = get_lens(__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase ) pickle_save(__lowerCAmelCase , train_ds.len_file ) pickle_save(__lowerCAmelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
322
1
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = ["a", "b", "c"] # Defaults to last layer if both are None _UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["c"] ) self.assertEqual(lowerCamelCase__ , [2] ) # Out indices set to match out features _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(["a", "c"] , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features set to match out indices _UpperCAmelCase , _UpperCAmelCase : Dict = get_aligned_output_features_output_indices(lowerCamelCase__ , [0, 2] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features selected from negative indices _UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(lowerCamelCase__ , [-3, -1] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [-3, -1] ) def lowerCAmelCase__ ( self : List[str] ) ->str: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCamelCase__ ) # Out features must be a list with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Any = BackboneMixin() _UpperCAmelCase : str = ["a", "b", "c"] _UpperCAmelCase : Optional[int] = ["a", "c"] _UpperCAmelCase : Optional[int] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _UpperCAmelCase : Optional[int] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _UpperCAmelCase : List[Any] = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
322
'''simple docstring''' import pytest lowerCamelCase__ = '__dummy_dataset1__' lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = dataset_loading_script_name _UpperCAmelCase : Any = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py""" with open(__lowerCAmelCase , "w" ) as f: f.write(__lowerCAmelCase ) return str(__lowerCAmelCase )
322
1
'''simple docstring''' from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Optional[Any] , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Union[str, Any] , ) ->Union[str, Any]: '''simple docstring''' super().__init__( lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , ) _UpperCAmelCase : List[str] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths} _UpperCAmelCase : Optional[Any] = Text( cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , ) def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: '''simple docstring''' if self.streaming: _UpperCAmelCase : Tuple = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase : Any = None _UpperCAmelCase : List[str] = None _UpperCAmelCase : List[Any] = None _UpperCAmelCase : List[Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , ) _UpperCAmelCase : Union[str, Any] = self.builder.as_dataset( split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset
322
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCamelCase__ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[ "https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score", "https://en.wikipedia.org/wiki/METEOR", ] , ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int: '''simple docstring''' import nltk nltk.download("wordnet" ) if NLTK_VERSION >= version.Version("3.6.5" ): nltk.download("punkt" ) if NLTK_VERSION >= version.Version("3.6.6" ): nltk.download("omw-1.4" ) def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any: '''simple docstring''' if NLTK_VERSION >= version.Version("3.6.5" ): _UpperCAmelCase : Dict = [ meteor_score.single_meteor_score( word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] else: _UpperCAmelCase : Optional[int] = [ meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ ) for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ ) ] return {"meteor": np.mean(lowerCamelCase__ )}
322
1
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # Load configuration defined in the metadata file with open(__lowerCAmelCase ) as metadata_file: _UpperCAmelCase : List[str] = json.load(__lowerCAmelCase ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : Tuple = torch.load(__lowerCAmelCase , map_location="cpu" ) # Load the entity vocab file _UpperCAmelCase : List[Any] = load_entity_vocab(__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : Dict = AddedToken("<ent>" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = AddedToken("<ent2>" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(__lowerCAmelCase ) # Initialize the embeddings of the special tokens _UpperCAmelCase : List[Any] = state_dict["embeddings.word_embeddings.weight"] _UpperCAmelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : int = F"""encoder.layer.{layer_index}.attention.self.""" _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] _UpperCAmelCase : List[Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"] _UpperCAmelCase : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]] _UpperCAmelCase : Optional[Any] = LukeModel(config=__lowerCAmelCase ).eval() _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) if not (len(__lowerCAmelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F"""Missing keys {', '.join(__lowerCAmelCase )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" ) # Check outputs _UpperCAmelCase : Optional[Any] = LukeTokenizer.from_pretrained(__lowerCAmelCase , task="entity_classification" ) _UpperCAmelCase : List[Any] = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) _UpperCAmelCase : Tuple = (39, 42) _UpperCAmelCase : str = tokenizer(__lowerCAmelCase , entity_spans=[span] , add_prefix_space=__lowerCAmelCase , return_tensors="pt" ) _UpperCAmelCase : List[str] = model(**__lowerCAmelCase ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : Any = torch.Size((1, 42, 1_024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base _UpperCAmelCase : Tuple = torch.Size((1, 42, 768) ) _UpperCAmelCase : Optional[Any] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : Tuple = torch.Size((1, 1, 1_024) ) _UpperCAmelCase : Optional[int] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base _UpperCAmelCase : Any = torch.Size((1, 1, 768) ) _UpperCAmelCase : Optional[Any] = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(__lowerCAmelCase ) ) model.save_pretrained(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = {} with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Tuple = line.rstrip().split("\t" ) _UpperCAmelCase : Dict = index return entity_vocab if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) lowerCamelCase__ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
322
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline lowerCamelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str: '''simple docstring''' if isinstance(lowerCamelCase__ , lowerCamelCase__ ): _UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()] return labels def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str: '''simple docstring''' if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0: raise ValueError("You must include at least one label and at least one sequence." ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( "The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. " "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(lowerCamelCase__ ) ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): _UpperCAmelCase : Optional[Any] = [sequences] _UpperCAmelCase : int = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(UpperCAmelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = args_parser super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]: '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("entail" ): return ind return -1 def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : int = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " " `pad_token=eos_token`" ) _UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token try: _UpperCAmelCase : List[str] = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , ) except Exception as e: if "too short" in str(lowerCamelCase__ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. _UpperCAmelCase : List[Any] = self.tokenizer( lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple: '''simple docstring''' if kwargs.get("multi_class" , lowerCamelCase__ ) is not None: _UpperCAmelCase : int = kwargs["multi_class"] logger.warning( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) _UpperCAmelCase : Dict = {} if "candidate_labels" in kwargs: _UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] ) if "hypothesis_template" in kwargs: _UpperCAmelCase : Dict = kwargs["hypothesis_template"] _UpperCAmelCase : List[str] = {} if "multi_label" in kwargs: _UpperCAmelCase : Optional[Any] = kwargs["multi_label"] return preprocess_params, {}, postprocess_params def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]: '''simple docstring''' if len(lowerCamelCase__ ) == 0: pass elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs: _UpperCAmelCase : int = args[0] else: raise ValueError(F"""Unable to understand extra arguments {args}""" ) return super().__call__(lowerCamelCase__ , **lowerCamelCase__ ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ): _UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase__ ) - 1, **model_input, } def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int: '''simple docstring''' _UpperCAmelCase : Dict = inputs["candidate_label"] _UpperCAmelCase : Optional[int] = inputs["sequence"] _UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names} _UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = { "candidate_label": candidate_label, "sequence": sequence, "is_last": inputs["is_last"], **outputs, } return model_outputs def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs] _UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs] _UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] ) _UpperCAmelCase : Optional[Any] = logits.shape[0] _UpperCAmelCase : Any = len(lowerCamelCase__ ) _UpperCAmelCase : str = N // n _UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase__ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently _UpperCAmelCase : int = self.entailment_id _UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0 _UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]] _UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) _UpperCAmelCase : str = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels _UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id] _UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
322
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Optional[Any] = "visual_bert" def __init__( self : Union[str, Any] , lowerCamelCase__ : Any=3_05_22 , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : Union[str, Any]=5_12 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : Union[str, Any]=30_72 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : int=1E-12 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : List[str]=2 , **lowerCamelCase__ : Any , ) ->Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = vocab_size _UpperCAmelCase : List[str] = max_position_embeddings _UpperCAmelCase : List[Any] = hidden_size _UpperCAmelCase : Optional[int] = visual_embedding_dim _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : Any = hidden_act _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : str = layer_norm_eps _UpperCAmelCase : Union[str, Any] = bypass_transformer _UpperCAmelCase : List[str] = special_visual_initialize
322
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ): _UpperCAmelCase : List[Any] = [] _UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase : Any = b, a + b return sum(__lowerCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
322
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[str] = int(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = t // 3_600, (t // 60) % 60, t % 60 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=300 ): # docstyle-ignore return F""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : List[Any] = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _UpperCAmelCase : int = F"""{elt:.6f}""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(__lowerCAmelCase ) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class lowerCAmelCase__ : lowerCAmelCase : List[Any] = 5 lowerCAmelCase : str = 0.2 def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase__ : int = 3_00 , ) ->str: '''simple docstring''' _UpperCAmelCase : Optional[int] = total _UpperCAmelCase : Optional[Any] = "" if prefix is None else prefix _UpperCAmelCase : Union[str, Any] = leave _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Optional[int] = width _UpperCAmelCase : Dict = None _UpperCAmelCase : List[str] = None _UpperCAmelCase : Optional[int] = None def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : bool = False , lowerCamelCase__ : str = None ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Dict = value if comment is not None: _UpperCAmelCase : Optional[int] = comment if self.last_value is None: _UpperCAmelCase : Dict = time.time() _UpperCAmelCase : List[str] = value _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : str = self.warmup _UpperCAmelCase : Dict = 1 self.update_bar(lowerCamelCase__ ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 _UpperCAmelCase : Union[str, Any] = time.time() _UpperCAmelCase : Any = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _UpperCAmelCase : Any = self.elapsed_time / (value - self.start_value) else: _UpperCAmelCase : List[Any] = None if value >= self.total: _UpperCAmelCase : int = self.total _UpperCAmelCase : Any = None if not self.leave: self.close() elif self.average_time_per_item is not None: _UpperCAmelCase : Optional[Any] = self.average_time_per_item * (self.total - value) self.update_bar(lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = value _UpperCAmelCase : Tuple = current_time if self.average_time_per_item is None: _UpperCAmelCase : str = 1 else: _UpperCAmelCase : Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]=None ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = " " * (len(str(self.total ) ) - len(str(lowerCamelCase__ ) )) + str(lowerCamelCase__ ) if self.elapsed_time is None: _UpperCAmelCase : Union[str, Any] = F"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: _UpperCAmelCase : str = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: _UpperCAmelCase : Optional[int] = ( F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" F""" {format_time(self.predicted_remaining )}""" ) self.label += F""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]""" self.display() def lowerCAmelCase__ ( self : Optional[int] ) ->str: '''simple docstring''' _UpperCAmelCase : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _UpperCAmelCase : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]: '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=None ) ->Any: '''simple docstring''' super().__init__(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = None if column_names is None else [column_names] _UpperCAmelCase : int = None def lowerCAmelCase__ ( self : Any ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _UpperCAmelCase : int = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Any: '''simple docstring''' if self.inner_table is None: _UpperCAmelCase : int = [list(values.keys() ), list(values.values() )] else: _UpperCAmelCase : Dict = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowerCamelCase__ ) _UpperCAmelCase : Tuple = columns self.inner_table.append([values[c] for c in columns] ) def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[int]=3_00 ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = NotebookProgressBar(lowerCamelCase__ , prefix=lowerCamelCase__ , parent=self , width=lowerCamelCase__ ) return self.child_bar def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = None self.display() class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Union[str, Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Tuple = None _UpperCAmelCase : List[str] = None _UpperCAmelCase : Optional[Any] = False def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : int = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : Dict = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) _UpperCAmelCase : Union[str, Any] = NotebookTrainingTracker(state.max_steps , lowerCamelCase__ ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , **lowerCamelCase__ : int ) ->Dict: '''simple docstring''' _UpperCAmelCase : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) _UpperCAmelCase : Tuple = False def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Optional[int] ) ->str: '''simple docstring''' if not has_length(lowerCamelCase__ ): return if self.prediction_bar is None: if self.training_tracker is not None: _UpperCAmelCase : Optional[Any] = self.training_tracker.add_child(len(lowerCamelCase__ ) ) else: _UpperCAmelCase : Any = NotebookProgressBar(len(lowerCamelCase__ ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : str , **lowerCamelCase__ : List[Any] ) ->Dict: '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() _UpperCAmelCase : List[Any] = None def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : List[Any] ) ->List[str]: '''simple docstring''' if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _UpperCAmelCase : Optional[Any] = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy _UpperCAmelCase : int = state.global_step self.training_tracker.write_line(lowerCamelCase__ ) def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Optional[Any] ) ->str: '''simple docstring''' if self.training_tracker is not None: _UpperCAmelCase : Optional[int] = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: _UpperCAmelCase : Union[str, Any] = log["loss"] break if self.first_column == "Epoch": _UpperCAmelCase : Optional[int] = int(state.epoch ) else: _UpperCAmelCase : Tuple = state.global_step _UpperCAmelCase : Dict = "eval" for k in metrics: if k.endswith("_loss" ): _UpperCAmelCase : Optional[Any] = re.sub(R"\_loss$" , "" , lowerCamelCase__ ) _UpperCAmelCase : List[Any] = metrics.pop("total_flos" , lowerCamelCase__ ) _UpperCAmelCase : Dict = metrics.pop("epoch" , lowerCamelCase__ ) _UpperCAmelCase : List[str] = metrics.pop(F"""{metric_key_prefix}_runtime""" , lowerCamelCase__ ) _UpperCAmelCase : str = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , lowerCamelCase__ ) _UpperCAmelCase : Tuple = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , lowerCamelCase__ ) for k, v in metrics.items(): if k == F"""{metric_key_prefix}_loss""": _UpperCAmelCase : Optional[int] = v else: _UpperCAmelCase : str = k.split("_" ) _UpperCAmelCase : Any = " ".join([part.capitalize() for part in splits[1:]] ) _UpperCAmelCase : Optional[int] = v self.training_tracker.write_line(lowerCamelCase__ ) self.training_tracker.remove_child() _UpperCAmelCase : List[Any] = None # Evaluation takes a long time so we should force the next update. _UpperCAmelCase : Dict = True def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Optional[int]: '''simple docstring''' self.training_tracker.update( state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCamelCase__ ) _UpperCAmelCase : Any = None
322
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = parent _UpperCAmelCase : List[Any] = batch_size _UpperCAmelCase : Optional[int] = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : Dict = use_attention_mask _UpperCAmelCase : Optional[Any] = use_token_type_ids _UpperCAmelCase : int = use_labels _UpperCAmelCase : Optional[int] = vocab_size _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : Tuple = intermediate_size _UpperCAmelCase : int = hidden_act _UpperCAmelCase : int = hidden_dropout_prob _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : Union[str, Any] = max_position_embeddings _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : List[Any] = type_sequence_label_size _UpperCAmelCase : Optional[int] = initializer_range _UpperCAmelCase : Dict = num_choices def lowerCAmelCase__ ( self : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Dict = None if self.use_attention_mask: _UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : int = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase__ ( self : Any ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Tuple = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs _UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase : Optional[int] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : int = FlaxAlbertModelTester(self ) @slow def lowerCAmelCase__ ( self : Any ) ->List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" ) _UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase__ ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" ) _UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] _UpperCAmelCase : List[Any] = (1, 11, 7_68) self.assertEqual(output.shape , lowerCamelCase__ ) _UpperCAmelCase : str = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
322
1