text
stringlengths 1
93.6k
|
|---|
test_df = pd.read_pickle(test_data_file)
|
test_annotations = test_df['prop_annotations'].values
|
test_annotations = list(map(lambda x: set(x), test_annotations))
|
go_rels.calculate_ic(annotations + test_annotations)
|
prot_index = {}
|
for i, row in enumerate(train_df.itertuples()):
|
prot_index[row.proteins] = i
|
# BLAST Similarity (Diamond)
|
diamond_scores = {}
|
with open(diamond_scores_file) as f:
|
for line in f:
|
it = line.strip().split()
|
if it[0] not in diamond_scores:
|
diamond_scores[it[0]] = {}
|
diamond_scores[it[0]][it[1]] = float(it[2])
|
blast_preds = []
|
for i, row in enumerate(test_df.itertuples()):
|
annots = {}
|
prot_id = row.proteins
|
# BlastKNN
|
if prot_id in diamond_scores:
|
sim_prots = diamond_scores[prot_id]
|
allgos = set()
|
total_score = 0.0
|
for p_id, score in sim_prots.items():
|
allgos |= annotations[prot_index[p_id]]
|
total_score += score
|
allgos = list(sorted(allgos))
|
sim = np.zeros(len(allgos), dtype=np.float32)
|
for j, go_id in enumerate(allgos):
|
s = 0.0
|
for p_id, score in sim_prots.items():
|
if go_id in annotations[prot_index[p_id]]:
|
s += score
|
sim[j] = s / total_score
|
for go_id, score in zip(allgos, sim):
|
annots[go_id] = score
|
blast_preds.append(annots)
|
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
|
go_set.remove(FUNC_DICT[ont])
|
labels = test_annotations
|
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
|
print(len(go_set))
|
fmax = 0.0
|
tmax = 0.0
|
smin = 1000.0
|
precisions = []
|
recalls = []
|
for t in range(101):
|
threshold = t / 100.0
|
preds = []
|
for i, row in enumerate(test_df.itertuples()):
|
annots = set()
|
for go_id, score in blast_preds[i].items():
|
if score >= threshold:
|
annots.add(go_id)
|
new_annots = set()
|
for go_id in annots:
|
new_annots |= go_rels.get_anchestors(go_id)
|
preds.append(new_annots)
|
# Filter classes
|
preds = list(map(lambda x: set(filter(lambda y: y in go_set, x)), preds))
|
fscore, prec, rec, s = evaluate_annotations(go_rels, labels, preds)
|
precisions.append(prec)
|
recalls.append(rec)
|
print(f'Fscore: {fscore}, S: {s}, threshold: {threshold}')
|
if fmax < fscore:
|
fmax = fscore
|
tmax = threshold
|
if smin > s:
|
smin = s
|
print(f'Fmax: {fmax:0.3f}, Smin: {smin:0.3f}, threshold: {tmax}')
|
precisions = np.array(precisions)
|
recalls = np.array(recalls)
|
sorted_index = np.argsort(recalls)
|
recalls = recalls[sorted_index]
|
precisions = precisions[sorted_index]
|
aupr = np.trapz(precisions, recalls)
|
print(f'AUPR: {aupr:0.3f}')
|
plt.figure()
|
lw = 2
|
plt.plot(recalls, precisions, color='darkorange',
|
lw=lw, label=f'AUPR curve (area = {aupr:0.3f})')
|
plt.xlim([0.0, 1.0])
|
plt.ylim([0.0, 1.05])
|
plt.xlabel('Recall')
|
plt.ylabel('Precision')
|
plt.title('Area Under the Precision-Recall curve')
|
plt.legend(loc="lower right")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.