text stringlengths 1 93.6k |
|---|
optimizer,
|
device,
|
epochs,
|
early_stopping=True,
|
early_num=20):
|
loss_list = [100]
|
early_epoch = 0
|
net = net.to(device)
|
print("training on ", device)
|
start = time.time()
|
train_loss_list = []
|
valida_loss_list = []
|
train_acc_list = []
|
valida_acc_list = []
|
for epoch in range(epochs):
|
train_acc_sum, n = 0.0, 0
|
time_epoch = time.time()
|
lr_adjust = torch.optim.lr_scheduler.CosineAnnealingLR(
|
optimizer, 15, eta_min=0.0, last_epoch=-1)
|
for X, y in train_iter:
|
batch_count, train_l_sum = 0, 0
|
#X = X.permute(0, 3, 1, 2)
|
X = X.to(device)
|
y = y.to(device)
|
y_hat = net(X)
|
# print('y_hat', y_hat)
|
# print('y', y)
|
l = loss(y_hat, y.long())
|
optimizer.zero_grad()
|
l.backward()
|
optimizer.step()
|
train_l_sum += l.cpu().item()
|
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
|
n += y.shape[0]
|
batch_count += 1
|
lr_adjust.step()
|
valida_acc, valida_loss = record.evaluate_accuracy(
|
valida_iter, net, loss, device)
|
loss_list.append(valida_loss)
|
train_loss_list.append(train_l_sum) # / batch_count)
|
train_acc_list.append(train_acc_sum / n)
|
valida_loss_list.append(valida_loss)
|
valida_acc_list.append(valida_acc)
|
print(
|
'epoch %d, train loss %.6f, train acc %.3f, valida loss %.6f, valida acc %.3f, time %.1f sec'
|
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n,
|
valida_loss, valida_acc, time.time() - time_epoch))
|
PATH = "./net_DBA.pt"
|
# if loss_list[-1] <= 0.01 and valida_acc >= 0.95:
|
# torch.save(net.state_dict(), PATH)
|
# break
|
if early_stopping and loss_list[-2] < loss_list[-1]:
|
if early_epoch == 0: # and valida_acc > 0.9:
|
torch.save(net.state_dict(), PATH)
|
early_epoch += 1
|
loss_list[-1] = loss_list[-2]
|
if early_epoch == early_num:
|
net.load_state_dict(torch.load(PATH))
|
break
|
else:
|
early_epoch = 0
|
print('epoch %d, loss %.4f, train acc %.3f, time %.1f sec'
|
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n,
|
time.time() - start))
|
def sampling(proportion, ground_truth):
|
train = {}
|
test = {}
|
labels_loc = {}
|
m = max(ground_truth)
|
for i in range(m):
|
indexes = [
|
j for j, x in enumerate(ground_truth.ravel().tolist())
|
if x == i + 1
|
]
|
np.random.shuffle(indexes)
|
labels_loc[i] = indexes
|
if proportion != 1:
|
nb_val = max(int((1 - proportion) * len(indexes)), 3)
|
else:
|
nb_val = 0
|
train[i] = indexes[:nb_val]
|
test[i] = indexes[nb_val:]
|
train_indexes = []
|
test_indexes = []
|
for i in range(m):
|
train_indexes += train[i]
|
test_indexes += test[i]
|
np.random.shuffle(train_indexes)
|
np.random.shuffle(test_indexes)
|
return train_indexes, test_indexes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.