text
stringlengths 1
93.6k
|
|---|
plt.savefig('aupr.pdf')
|
plt.show()
|
def compute_roc(labels, preds):
|
# Compute ROC curve and ROC area for each class
|
fpr, tpr, _ = roc_curve(labels.flatten(), preds.flatten())
|
roc_auc = auc(fpr, tpr)
|
return roc_auc
|
def compute_mcc(labels, preds):
|
# Compute ROC curve and ROC area for each class
|
mcc = matthews_corrcoef(labels.flatten(), preds.flatten())
|
return mcc
|
def evaluate_annotations(go, real_annots, pred_annots):
|
total = 0
|
p = 0.0
|
r = 0.0
|
p_total= 0
|
ru = 0.0
|
mi = 0.0
|
for i in range(len(real_annots)):
|
if len(real_annots[i]) == 0:
|
continue
|
tp = real_annots[i].intersection(pred_annots[i])
|
fp = pred_annots[i] - tp
|
fn = real_annots[i] - tp
|
for go_id in fp:
|
mi += go.get_ic(go_id)
|
for go_id in fn:
|
ru += go.get_ic(go_id)
|
tpn = len(tp)
|
fpn = len(fp)
|
fnn = len(fn)
|
total += 1
|
recall = tpn / (1.0 * (tpn + fnn))
|
r += recall
|
if len(pred_annots[i]) > 0:
|
p_total += 1
|
precision = tpn / (1.0 * (tpn + fpn))
|
p += precision
|
ru /= total
|
mi /= total
|
r /= total
|
if p_total > 0:
|
p /= p_total
|
f = 0.0
|
if p + r > 0:
|
f = 2 * p * r / (p + r)
|
s = math.sqrt(ru * ru + mi * mi)
|
return f, p, r, s
|
if __name__ == '__main__':
|
main()
|
# <FILESEP>
|
# coding=utf-8
|
# Copyright 2019 The Google Research Authors.
|
#
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
# you may not use this file except in compliance with the License.
|
# You may obtain a copy of the License at
|
#
|
# http://www.apache.org/licenses/LICENSE-2.0
|
#
|
# Unless required by applicable law or agreed to in writing, software
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# See the License for the specific language governing permissions and
|
# limitations under the License.
|
import torch
|
import torch.nn as nn
|
class ConditionalInstanceNorm2dPlus(nn.Module):
|
def __init__(self, num_features, num_classes, bias=True):
|
super().__init__()
|
self.num_features = num_features
|
self.bias = bias
|
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
|
if bias:
|
self.embed = nn.Embedding(num_classes, num_features * 3)
|
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
|
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
|
else:
|
self.embed = nn.Embedding(num_classes, 2 * num_features)
|
self.embed.weight.data.normal_(1, 0.02)
|
def forward(self, x, y):
|
means = torch.mean(x, dim=(2, 3))
|
m = torch.mean(means, dim=-1, keepdim=True)
|
v = torch.var(means, dim=-1, keepdim=True)
|
means = (means - m) / (torch.sqrt(v + 1e-5))
|
h = self.instance_norm(x)
|
if self.bias:
|
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.