text
stringlengths
1
93.6k
if eval_tasks:
run_eval_tasks()
return
else:
# Else, just train
while current_step < params["train_steps"]:
# Else, don't stop and restart
estimator.train(input_fn=partial(input_fn, global_step=current_step, eval=False), max_steps=params["train_steps"])
if __name__ == "__main__":
tf.disable_v2_behavior()
args = parse_args()
main(args)
# <FILESEP>
import torch
from sklearn.metrics.pairwise import cosine_similarity
import scipy.sparse as sp
import numpy as np
import logging
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def adj_norm(adj, neighbor_only=False):
if not neighbor_only:
adj = torch.add(torch.eye(adj.shape[0]).cuda(), adj)
if adj.is_sparse:
degree = adj.to_dense().sum(dim=1)
else:
degree = adj.sum(dim=1)
in_degree_norm = torch.pow(degree.view(1, -1), -0.5).expand(adj.shape[0], adj.shape[0])
in_degree_norm = torch.where(torch.isinf(in_degree_norm), torch.full_like(in_degree_norm, 0), in_degree_norm)
out_degree_norm = torch.pow(degree.view(-1, 1), -0.5).expand(adj.shape[0], adj.shape[0])
out_degree_norm = torch.where(torch.isinf(out_degree_norm), torch.full_like(out_degree_norm, 0), out_degree_norm)
adj = sparse_dense_mul(adj, in_degree_norm)
adj = sparse_dense_mul(adj, out_degree_norm)
return adj
def sparse_dense_mul(s, d):
if not s.is_sparse:
return s * d
i = s._indices()
v = s._values()
dv = d[i[0, :], i[1, :]] # get values from relevant entries of dense matrix
return torch.sparse.FloatTensor(i, v * dv, s.size())
def evaluate(model, adj, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(adj, features)
logits = logits[mask]
test_labels = labels[mask]
_, indices = logits.max(dim=1)
correct = torch.sum(indices == test_labels)
return correct.item() * 1.0 / test_labels.shape[0]
def get_reliable_neighbors(adj, features, k, degree_threshold):
degree = adj.sum(dim=1)
degree_mask = degree > degree_threshold
assert degree_mask.sum().item() >= k
sim = cosine_similarity(features.to('cpu'))
sim = torch.FloatTensor(sim).to('cuda')
sim[:, degree_mask == False] = 0
_, top_k_indices = sim.topk(k=k, dim=1)
for i in range(adj.shape[0]):
adj[i][top_k_indices[i]] = 1
adj[i][i] = 0
return
def adj_new_norm(adj, alpha):
adj = torch.add(torch.eye(adj.shape[0]).cuda(), adj)
degree = adj.sum(dim=1)
in_degree_norm = torch.pow(degree.view(1, -1), alpha).expand(adj.shape[0], adj.shape[0])
out_degree_norm = torch.pow(degree.view(-1, 1), alpha).expand(adj.shape[0], adj.shape[0])
adj = sparse_dense_mul(adj, in_degree_norm)
adj = sparse_dense_mul(adj, out_degree_norm)