text stringlengths 1 93.6k |
|---|
"""
|
# -----------------------------------------------------------------------------
|
def calculate_recommendation(
|
tags,
|
time_delta = 3, # how recent papers are we recommending? in days
|
):
|
# a bit of preprocessing
|
x, pids = features['x'], features['pids']
|
n, d = x.shape
|
ptoi, itop = {}, {}
|
for i, p in enumerate(pids):
|
ptoi[p] = i
|
itop[i] = p
|
# loop over all the tags
|
all_pids, all_scores = {}, {}
|
for tag, pids in tags.items():
|
if len(pids) == 0:
|
continue
|
# construct the positive set for this tag
|
y = np.zeros(n, dtype=np.float32)
|
for pid in pids:
|
y[ptoi[pid]] = 1.0
|
# classify
|
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.01)
|
clf.fit(x, y)
|
s = clf.decision_function(x)
|
sortix = np.argsort(-s)
|
pids = [itop[ix] for ix in sortix]
|
scores = [100*float(s[ix]) for ix in sortix]
|
# filter by time to only recent papers
|
deltat = time_delta*60*60*24 # allowed time delta in seconds
|
keep = [i for i,pid in enumerate(pids) if (tnow - metas[pid]['_time']) < deltat]
|
pids, scores = [pids[i] for i in keep], [scores[i] for i in keep]
|
# finally exclude the papers we already have tagged
|
have = set().union(*tags.values())
|
keep = [i for i,pid in enumerate(pids) if pid not in have]
|
pids, scores = [pids[i] for i in keep], [scores[i] for i in keep]
|
# store results
|
all_pids[tag] = pids
|
all_scores[tag] = scores
|
return all_pids, all_scores
|
# -----------------------------------------------------------------------------
|
def render_recommendations(user, tags, tag_pids, tag_scores):
|
# render the paper recommendations into the html template
|
# first we are going to merge all of the papers / scores together using a MAX
|
max_score = {}
|
max_source_tag = {}
|
for tag in tag_pids:
|
for pid, score in zip(tag_pids[tag], tag_scores[tag]):
|
max_score[pid] = max(max_score.get(pid, -99999), score) # lol
|
if max_score[pid] == score:
|
max_source_tag[pid] = tag
|
# now we have a dict of pid -> max score. sort by score
|
max_score_list = sorted(max_score.items(), key=lambda x: x[1], reverse=True)
|
pids, scores = zip(*max_score_list)
|
# now render the html for each individual recommendation
|
parts = []
|
n = min(len(scores), args.num_recommendations)
|
for score, pid in zip(scores[:n], pids[:n]):
|
p = pdb[pid]
|
authors = ', '.join(a['name'] for a in p['authors'])
|
# crop the abstract
|
summary = p['summary']
|
summary = summary[:min(500, len(summary))]
|
if len(summary) == 500:
|
summary += '...'
|
# create the url that will feature this paper on top and also show the most similar papers
|
url = 'https://arxiv-sanity-lite.com/?rank=pid&pid=' + pid
|
parts.append(
|
"""
|
<tr>
|
<td valign="top"><div class="s">%.2f</div></td>
|
<td>
|
<a href="%s">%s</a> <div class="f">(%s)</div>
|
<div class="a">%s</div>
|
<div class="u">%s</div>
|
</td>
|
</tr>
|
""" % (score, url, p['title'], max_source_tag[pid], authors, summary)
|
)
|
# render the final html
|
out = template
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.