text
stringlengths 1
93.6k
|
|---|
TRAIN_DATASETS = {
|
'mnist': datasets.MNIST(
|
'./datasets/mnist', train=True, download=True,
|
transform=transforms.Compose(_MNIST_TRAIN_TRANSFORMS)
|
),
|
'cifar10': datasets.CIFAR10(
|
'./datasets/cifar10', train=True, download=True,
|
transform=transforms.Compose(_CIFAR_TRAIN_TRANSFORMS)
|
),
|
'cifar100': datasets.CIFAR100(
|
'./datasets/cifar100', train=True, download=True,
|
transform=transforms.Compose(_CIFAR_TRAIN_TRANSFORMS)
|
)
|
}
|
TEST_DATASETS = {
|
'mnist': datasets.MNIST(
|
'./datasets/mnist', train=False,
|
transform=transforms.Compose(_MNIST_TEST_TRANSFORMS)
|
),
|
'cifar10': datasets.CIFAR10(
|
'./datasets/cifar10', train=False,
|
transform=transforms.Compose(_CIFAR_TEST_TRANSFORMS)
|
),
|
'cifar100': datasets.CIFAR100(
|
'./datasets/cifar100', train=False,
|
transform=transforms.Compose(_CIFAR_TEST_TRANSFORMS)
|
)
|
}
|
DATASET_CONFIGS = {
|
'mnist': {'size': 32, 'channels': 1, 'classes': 10},
|
'cifar10': {'size': 32, 'channels': 3, 'classes': 10},
|
'cifar100': {'size': 32, 'channels': 3, 'classes': 100},
|
}
|
# <FILESEP>
|
import os, math, torch, pickle
|
from tqdm import tqdm
|
from datetime import datetime
|
from torch.nn.functional import cross_entropy
|
from config import ModelConfig
|
from utils import load_model_and_tokenizer, complete_input, extract_model_embedding
|
class Attacker:
|
def __init__(self, model_name, init_input, target, device='cuda:0', steps=768, topk=256, batch_size=1024, mini_batch_size=16, **kwargs):
|
try:
|
self.model_config = getattr(ModelConfig, model_name)[0]
|
except AttributeError:
|
raise NotImplementedError
|
self.model_name = model_name
|
self.init_input = init_input
|
self.target = target
|
self.device = device
|
self.steps = steps
|
self.topk = topk
|
self.batch_size = batch_size
|
self.mini_batch_size = mini_batch_size
|
self.mini_batches = math.ceil(self.batch_size/self.mini_batch_size)
|
self.kwargs = kwargs
|
self.model, self.tokenizer = load_model_and_tokenizer(
|
self.model_config['path'], self.device, False
|
)
|
self.temp_step = 0
|
self.temp_input = self.init_input
|
self.temp_output = ''
|
self.temp_loss = 1e+9
|
self.temp_grad = None
|
self.temp_input_ids = None
|
self.temp_sample_list = []
|
self.temp_sample_ids = None
|
self.input_slice = None
|
self.target_slice = None
|
self.input_list = []
|
self.output_list = []
|
self.loss_list = []
|
self.route_input = self.init_input
|
self.route_loss = 1e+9
|
self.route_step_list = []
|
self.route_input_list = []
|
self.route_output_list = []
|
self.route_loss_list = []
|
def test(self):
|
self.model.eval()
|
input_str = complete_input(self.model_config, self.temp_input)
|
input_ids = self.tokenizer(
|
input_str, truncation=True, return_tensors='pt'
|
).input_ids.to(self.device)
|
generate_ids = self.model.generate(input_ids, max_new_tokens=96)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.