repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
medtray/MultiEm-RGCN | nordlys/nordlys/core/data/dbpedia/indexer_dbpedia_uri.py | <filename>nordlys/nordlys/core/data/dbpedia/indexer_dbpedia_uri.py
"""
DBpedia URI Indexer
===================
Generates URI-only DBpedia index.
:Author: <NAME>
"""
import argparse
import json
from collections import defaultdict
from nordlys.config import MONGO_COLLECTION_DBPEDIA, MONGO_HOST, MONGO_DB
from nordlys.core.data.dbpedia.indexer_dbpedia import IndexerDBpedia
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.indexer_mongo import IndexerMongo
from nordlys.core.storage.mongo import Mongo
from nordlys.core.utils.file_utils import FileUtils
from nordlys.config import PLOGGER
class IndexerDBpediaURI(IndexerDBpedia):
def __init__(self, config, field_counts, collection=MONGO_COLLECTION_DBPEDIA):
super(IndexerDBpediaURI, self).__init__(config, collection)
self.__n = config.get("top_n_fields", 500)
self.__field_counts = field_counts
self.__top_fields = None
def get_top_fields(self):
"""Gets top-n frequent fields from DBpedia
NOTE: Rank of fields with the same frequency is equal.
This means that there can more than one field for each rank.
"""
PLOGGER.info("Getting the top-n frequent DBpedia fields ...")
sorted_fields = sorted(self.__field_counts.items(), key=lambda item: item[1], reverse=True)
PLOGGER.info("Number of total fields: " + str(len(sorted_fields)))
top_fields = []
rank, prev_count, i = 0, 0, 0
for field, count in sorted_fields:
if field in self._config["blacklist"]:
continue
# changes the rank if the count number is changed
i += 1
if prev_count != count:
rank = i
prev_count = count
if rank > self.__n:
break
top_fields.append(field)
self.__top_fields = top_fields
def get_mappings(self):
"""Sets the mappings"""
mappings = {Elastic.FIELD_CATCHALL: Elastic.notanalyzed_searchable_field()}
for field in self._fsdm_fields:
mappings[field] = Elastic.notanalyzed_searchable_field()
self.get_top_fields()
for field in self.__top_fields:
mappings[field] = Elastic.notanalyzed_searchable_field()
return mappings
def __get_field_value(self, value, f=None):
"""Converts mongoDB field value to indexable values by resolving URIs."""
nval = [] # holds resolved values
for v in value:
if v.startswith("<dbpedia:"):
nval.append(v)
return nval
def get_doc_content(self, doc):
"""create the index content for a given mongo document
Here we keep both FSDM fields and individual fields for each document.
:param doc: a Mongo document
:return: a document ready for indexing
"""
# Ignores document if the ID does not start with "<dbpedia:" (just to speed up)
doc_id = Mongo.unescape(doc[Mongo.ID_FIELD])
if not doc_id.startswith("<dbpedia:"):
return None
# Ignores document if it does not have must have fields
for f in self._config["must_have"]:
if f not in doc:
return None
self._doc_content = defaultdict(list)
for f in doc:
# Adds content for FSDM fields
if f.lower() in self._config["names"]:
self._doc_content["names"] += self.__get_field_value(doc[f])
elif f in self._config["categories"]:
self._doc_content["categories"] += self.__get_field_value(doc[f])
elif f in self._config["similar_entity_names"]:
self._doc_content["similar_entity_names"] += self.__get_field_value(doc[f])
elif f not in self._config["blacklist"]:
if doc[f][0].startswith("<dbpedia:"):
self._doc_content["related_entity_names"] += self.__get_field_value(doc[f], f)
else:
self._doc_content["attributes"] += self.__get_field_value(doc[f], f)
# Adds content for each individual field
if f in self.__top_fields:
self._doc_content[f] += self.__get_field_value(doc[f])
# keeps only unique phrases for each field
# Adds everything to the catchall field
for field in self._fsdm_fields:
self._doc_content[field] = list(set(self._doc_content[field]))
self._doc_content[Elastic.FIELD_CATCHALL] += self._doc_content[field]
return self._doc_content
def build(self):
mappings = self.get_mappings()
indexer = IndexerMongo(self._index_name, mappings, MONGO_COLLECTION_DBPEDIA, model=self._model)
indexer.build(self.get_doc_content)
def compute_field_counts():
"""Reads all documents in the Mongo collection and calculates field frequencies.
i.e. For DBpedia collection, it returns all entity fields.
:return a dictionary of fields and their frequency
"""
PLOGGER.info("Counting fields ...")
dbpedia_coll = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_DBPEDIA).find_all()
i = 0
field_counts = dict()
for entity in dbpedia_coll:
for field in entity:
if field == Mongo.ID_FIELD:
continue
if field in field_counts:
field_counts[field] += 1
else:
field_counts[field] = 1
i += 1
if i % 1000000 == 0:
PLOGGER.info("\t" + str(int(i / 1000000)) + "M entity is processed!")
return field_counts
def main(args):
config = FileUtils.load_config(args.config)
if "_uri" not in config["index_name"]:
PLOGGER.error("index name might not be correct, please check again!")
exit(0)
if "fields_file" not in config:
fields_count = compute_field_counts()
else:
fields_count = json.load(config["fields_file"])
indexer = IndexerDBpediaURI(config, fields_count)
indexer.build()
PLOGGER.info("Index build: " + config["index_name"])
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="config file", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
main(arg_parser()) |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/el/__init__.py | <gh_stars>10-100
"""
Entity linking
==============
This package is the implementation of entity linking.
"""
|
medtray/MultiEm-RGCN | train_unsupervised.py | """
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/MichSchli/RelationPrediction
Difference compared to MichSchli/RelationPrediction
* Report raw metrics instead of filtered metrics.
* By default, we use uniform edge sampling instead of neighbor-based edge
sampling used in author's code. In practice, we find it achieves similar MRR
probably because the model only uses one GNN layer so messages are propagated
among immediate neighbors. User could specify "--edge-sampler=neighbor" to switch
to neighbor-based edge sampling.
"""
import argparse
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from dgl.contrib.data import load_data
from dgl.nn.pytorch import RelGraphConv
import subprocess
from model import BaseRGCN
import os
import utils
class EmbeddingLayer(nn.Module):
def __init__(self, num_nodes, h_dim):
super(EmbeddingLayer, self).__init__()
self.embedding = torch.nn.Embedding(num_nodes, h_dim)
def forward(self, g, h, r, norm):
return self.embedding(h.squeeze())
class RGCN(BaseRGCN):
def build_input_layer(self):
return EmbeddingLayer(self.num_nodes, self.h_dim)
def build_hidden_layer(self, idx):
act = F.relu if idx < self.num_hidden_layers - 1 else None
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "bdd",
self.num_bases, activation=act, self_loop=True,
dropout=self.dropout)
class LinkPredict(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, num_bases=-1,
num_hidden_layers=1, dropout=0, use_cuda=False, reg_param=0):
super(LinkPredict, self).__init__()
self.rgcn = RGCN(in_dim, h_dim, h_dim, num_rels * 2, num_bases,
num_hidden_layers, dropout, use_cuda)
self.reg_param = reg_param
self.w_relation = nn.Parameter(torch.Tensor(2*num_rels, h_dim))
nn.init.xavier_uniform_(self.w_relation,
gain=nn.init.calculate_gain('relu'))
def calc_score(self, embedding, triplets):
# DistMult
s = embedding[triplets[:, 0]]
r = self.w_relation[triplets[:, 1]]
o = embedding[triplets[:, 2]]
score = torch.sum(s * r * o, dim=1)
return score
def forward(self, g, h, r, norm):
return self.rgcn.forward(g, h, r, norm)
def regularization_loss(self, embedding):
return torch.mean(embedding.pow(2)) + torch.mean(self.w_relation.pow(2))
def get_loss(self, g, embed, triplets, labels):
# triplets is a list of data samples (positive and negative)
# each row in the triplets is a 3-tuple of (source, relation, destination)
score = self.calc_score(embed, triplets)
predict_loss = F.binary_cross_entropy_with_logits(score, labels)
reg_loss = self.regularization_loss(embed)
#unified embedding
# x = embed.unsqueeze(0)
# y = x
# x_norm = (x ** 2).sum(2).view(x.shape[0], x.shape[1], 1)
# y_t = y.permute(0, 2, 1).contiguous()
# y_norm = (y ** 2).sum(2).view(y.shape[0], 1, y.shape[1])
# dist = x_norm + y_norm - 2.0 * torch.bmm(x, y_t)
# dist[dist != dist] = 0 # replace nan values with 0
# pairwise_dist = torch.clamp(dist, 0.0, np.inf).squeeze(0)
# sum_pairwise=torch.sum(pairwise_dist)
# positive_triples=triplets[torch.nonzero(labels==1).squeeze()]
# pdist = nn.PairwiseDistance(p=2)
#
# def loss_term(rel_id,weight):
# a = torch.nonzero(positive_triples[:, 1] == rel_id)
# bb = positive_triples[a.squeeze()]
# sum_pairwise=0
# if (len(bb.shape) == 1):
# bb = bb.unsqueeze(0)
# if (len(bb.shape) > 0):
# embed1 = embed[bb[:, 0]]
# embed2 = embed[bb[:, 2]]
# output = pdist(embed1, embed2)
# sum_pairwise = weight * torch.sum(output)
#
# return sum_pairwise
#
sum_pairwise=0
# epsilon=0.00001
# for i in range(3):
# sum_pairwise+=loss_term(i, i+epsilon)
#
# for i in range(11,14):
# sum_pairwise += loss_term(i, i-11+epsilon)
#
# sum_pairwise += loss_term(9, 2+epsilon)
# sum_pairwise += loss_term(20, 2+epsilon)
return predict_loss + self.reg_param * reg_loss+0.0*sum_pairwise
def node_norm_to_edge_norm(g, node_norm):
g = g.local_var()
# convert to edge norm
g.ndata['norm'] = node_norm
g.apply_edges(lambda edges: {'norm': edges.dst['norm']})
return g.edata['norm']
def _read_dictionary(filename):
d = {}
with open(filename, 'r+') as f:
for line in f:
line = line.strip().split('\t')
d[line[1]] = int(line[0])
return d
def _read_dictionary_test(filename):
d = {}
with open(filename, 'r+') as f:
for line in f:
line = line.strip().split('\t')
d[line[0]] = line[1]
return d
def _read_triplets(filename):
with open(filename, 'r+') as f:
for line in f:
processed_line = line.strip().split('\t')
yield processed_line
def _read_triplets_as_list(filename, entity_dict, relation_dict):
l = []
for triplet in _read_triplets(filename):
s = entity_dict[triplet[0]]
r = relation_dict[triplet[1]]
o = entity_dict[triplet[2]]
l.append([s, r, o])
return l
class RGCNLinkDataset(object):
def __init__(self, name):
self.name = name
self.dir = './'
self.dir = os.path.join(self.dir, self.name)
def load(self):
entity_path = os.path.join(self.dir, 'entities.dict')
relation_path = os.path.join(self.dir, 'relations.dict')
train_path = os.path.join(self.dir, 'train.txt')
valid_path = os.path.join(self.dir, 'valid.txt')
test_path = os.path.join(self.dir, 'test.txt')
entity_dict = _read_dictionary(entity_path)
relation_dict = _read_dictionary(relation_path)
self.train = np.array(_read_triplets_as_list(train_path, entity_dict, relation_dict))
self.valid = np.array(_read_triplets_as_list(valid_path, entity_dict, relation_dict))
self.test = np.array(_read_triplets_as_list(test_path, entity_dict, relation_dict))
self.num_nodes = len(entity_dict)
print("# entities: {}".format(self.num_nodes))
self.num_rels = len(relation_dict)
print("# relations: {}".format(self.num_rels))
print("# edges: {}".format(len(self.train)))
def sort_and_rank(score, target):
_, indices = torch.sort(score, dim=1, descending=True)
indices = torch.nonzero(indices == target.view(-1, 1))
indices = indices[:, 1].view(-1)
return indices
def get_relation_score(embedding, w, a, b, test_size, batch_size=100):
""" Perturb one element in the triplets
"""
n_batch = (test_size + batch_size - 1) // batch_size
ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch))
batch_start = idx * batch_size
batch_end = min(test_size, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
target = b[batch_start: batch_end]
relevance_relations = [6,7,8]
# relevance_relations = [1, 2, 3]
scores = []
for rel_relation in relevance_relations:
relation = rel_relation * torch.ones(target.shape[0]).type(torch.int64)
s = embedding[batch_a]
r = w[relation]
o = embedding[target]
scores.append(torch.sum(s * r * o, dim=1))
final_scores = torch.cat([score.view(-1, 1) for score in scores], dim=1)
labels = torch.argmax(final_scores, dim=1)
ranks.append(labels)
return torch.cat(ranks)
def get_relevance_relation_score(embedding, w, a, b, test_size, batch_size=100):
""" Perturb one element in the triplets
"""
n_batch = (test_size + batch_size - 1) // batch_size
ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch))
batch_start = idx * batch_size
batch_end = min(test_size, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
target = b[batch_start: batch_end]
rel_relation = 7
# relevance_relations = [1, 2, 3]
relation = rel_relation * torch.ones(target.shape[0]).type(torch.int64)
s = embedding[batch_a]
r = w[relation]
o = embedding[target]
ranks.append(torch.sum(s * r * o, dim=1))
return torch.cat(ranks)
def calculate_ndcg(output_file, ndcg_file):
# batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
batcmd = "./trec_eval -m map " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
map = float(res[2])
batcmd = "./trec_eval -m recip_rank " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
mrr = float(res[2])
batcmd = "./trec_eval -m ndcg_cut.5 " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg, map, mrr
def main(args):
args.device = 'cuda:' + str(args.gpu)
# load graph data
data = RGCNLinkDataset(args.dataset)
dir_base = data.dir
data.load()
num_nodes = data.num_nodes
train_data = data.train
# valid_data = data.valid
# test_data = data.test
num_rels = data.num_rels
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(args.gpu)
# create model
model = LinkPredict(num_nodes,
args.n_hidden,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers,
dropout=args.dropout,
use_cuda=use_cuda,
reg_param=args.regularization)
# validation and testing triplets
# valid_data = torch.LongTensor(valid_data)
# test_data = torch.LongTensor(test_data)
# build test graph
test_graph, test_rel, test_norm,train_data = utils.build_test_graph(
num_nodes, num_rels, train_data)
# test_deg = test_graph.in_degrees(
# range(test_graph.number_of_nodes())).float().view(-1, 1)
# test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
# test_rel = torch.from_numpy(test_rel)
# test_norm = node_norm_to_edge_norm(test_graph, torch.from_numpy(test_norm).view(-1, 1))
if use_cuda:
model.cuda()
# build adj list and calculate degrees for sampling
adj_list, degrees = utils.get_adj_and_degrees(num_nodes, train_data)
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
model_state_file = os.path.join(args.dataset,'model_state_modified.pth')
def load_checkpoint_for_eval(model, filename):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename,map_location=args.device)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
model = load_checkpoint_for_eval(model, model_state_file)
forward_time = []
backward_time = []
# training loop
print("start training...")
tables_id_file = os.path.join(args.dataset, 'tables_id.npy')
tables_id = np.load(tables_id_file)
epoch = 0
best_mrr = 0
while True:
model.train()
epoch += 1
# perform edge neighborhood sampling to generate training graph and data
g, node_id, edge_type, node_norm, data, labels = \
utils.generate_sampled_graph_and_labels(
train_data, args.graph_batch_size, args.graph_split_size,
num_rels, adj_list, degrees, args.negative_sample,tables_id,
args.edge_sampler)
print("Done edge sampling")
# set node/edge feature
node_id = torch.from_numpy(node_id).view(-1, 1).long()
edge_type = torch.from_numpy(edge_type)
edge_norm = node_norm_to_edge_norm(g, torch.from_numpy(node_norm).view(-1, 1))
data, labels = torch.from_numpy(data), torch.from_numpy(labels)
deg = g.in_degrees(range(g.number_of_nodes())).float().view(-1, 1)
if use_cuda:
node_id, deg = node_id.cuda(), deg.cuda()
edge_type, edge_norm = edge_type.cuda(), edge_norm.cuda()
data, labels = data.cuda(), labels.cuda()
t0 = time.time()
embed = model(g, node_id, edge_type, edge_norm)
loss = model.get_loss(g, embed, data, labels)
t1 = time.time()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:04d} | Loss {:.4f} | Best MRR {:.4f} | Forward {:.4f}s | Backward {:.4f}s".
format(epoch, loss.item(), best_mrr, forward_time[-1], backward_time[-1]))
optimizer.zero_grad()
if epoch % args.evaluate_every == 0:
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
# # validation
# if epoch % args.evaluate_every == 0:
# # perform validation on CPU because full graph is too large
# if use_cuda:
# model.cpu()
# model.eval()
# print("start eval")
# embed = model(test_graph, test_node_id, test_rel, test_norm)
# mrr = utils.calc_mrr(embed, model.w_relation, valid_data,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
# # save best model
# if mrr < best_mrr:
# if epoch >= args.n_epochs:
# break
# else:
# best_mrr = mrr
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
# model_state_file)
# if use_cuda:
# model.cuda()
if epoch >= args.n_epochs:
break
torch.save({'state_dict': model.state_dict(), 'epoch': epoch},
model_state_file)
print("training done")
print("Mean forward time: {:4f}s".format(np.mean(forward_time)))
print("Mean Backward time: {:4f}s".format(np.mean(backward_time)))
# print("\nstart testing:")
# # use best model checkpoint
# checkpoint = torch.load(model_state_file)
# if use_cuda:
# model.cpu() # test on CPU
# model.eval()
# model.load_state_dict(checkpoint['state_dict'])
# print("Using best epoch: {}".format(checkpoint['epoch']))
# embed = model(test_graph, test_node_id, test_rel, test_norm)
#
# utils.calc_mrr(embed, model.w_relation, test_data,
# hits=[1, 3, 10], eval_bz=args.eval_batch_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=100,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=0,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=10,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--n-epochs", type=int, default=1000,
help="number of minimum training epochs")
parser.add_argument("--dataset", type=str, default='wikiTables',
help="dataset to use")
parser.add_argument("--eval-batch-size", type=int, default=2000,
help="batch size when evaluating")
parser.add_argument("--regularization", type=float, default=0.01,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--graph-batch-size", type=int, default=5000,
help="number of edges to sample in each iteration")
parser.add_argument("--graph-split-size", type=float, default=0.5,
help="portion of edges used as positive sample")
parser.add_argument("--negative-sample", type=int, default=10,
help="number of negative samples per positive sample")
parser.add_argument("--evaluate-every", type=int, default=100,
help="perform evaluation every n epochs")
parser.add_argument("--edge-sampler", type=str, default="neighbor",
help="type of edge sampler: 'uniform' or 'neighbor'")
args = parser.parse_args()
print(args)
main(args)
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/er/__init__.py | <reponame>medtray/MultiEm-RGCN
"""
Entity retrieval
================
This is the entity retrieval package.
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/tti/type_centric.py | <gh_stars>10-100
"""
Type centric method for TTI.
@author:
"""
from nordlys.config import ELASTIC_TTI_INDICES
from nordlys.core.retrieval.elastic_cache import ElasticCache
TC_INDEX = ELASTIC_TTI_INDICES[0]
class TypeCentric(object):
def __init__(self, query, retrieval_config):
self.__query = query
self.__retrieval_config = retrieval_config
self.__elasttic = ElasticCache(TC_INDEX)
def __type_centric(self, query):
"""Type-centric TTI."""
types = dict()
model = self.__config.get("model", TTI_MODEL_BM25)
elastic = ElasticCache(self.__tc_config.get("index", DEFAULT_TTI_TC_INDEX))
if model == TTI_MODEL_BM25:
print("TTI, TC, BM25")
scorer = Scorer.get_scorer(elastic, query, self.__tc_config)
types = Retrieval(self.__tc_config).retrieve(query, scorer)
elif model == TTI_MODEL_LM:
print("TTI, TC, LM")
self.__tc_config["model"] = "lm" # Needed for 2nd-pass
self.__tc_config["field"] = "content" # Needed for 2nd-pass
self.__tc_config["second_pass"] = {
"field": "content"
}
for param in ["smoothing_method", "smoothing_param"]:
if self.__config.get(param, None) is not None:
self.__tc_config["second_pass"][param] = self.__config.get(param)
scorer = Scorer.get_scorer(elastic, query, self.__tc_config)
types = Retrieval(self.__tc_config).retrieve(query, scorer)
return types |
medtray/MultiEm-RGCN | nordlys/nordlys/core/utils/logging_utils.py | <gh_stars>10-100
"""
Logging Utils
=============
Utility methods for logging.
:Author: <NAME>
"""
import logging, time
class RequestHandler(object):
"""Handler for elastic request"""
def __init__(self, logging_path):
self.fh = self._init_handler(logging_path)
def _init_handler(self, logging_path):
"""Create log file base on logging time setting"""
date = time.strftime("%Y-%m-%d") # get current date
log_file = "{0}/api/{1}.log".format(logging_path, date)
fh = logging.FileHandler(log_file, "a")
fh.setLevel(logging.INFO)
return fh
class PrintHandler(object):
"""Handler for elastic prints"""
def __init__(self, logging_level):
self.ch = self._init_handler(logging_level)
def _init_handler(self, logging_level):
"""Create log stream"""
ch = logging.StreamHandler()
ch.setLevel(logging_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
return ch |
medtray/MultiEm-RGCN | nordlys/nordlys/services/api.py | <reponame>medtray/MultiEm-RGCN
"""
Nordlys API
===========
This is the main console application for the Nordlys API.
:Authors: <NAME>, <NAME>, <NAME>
"""
from flask import Flask, jsonify, request
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.logic.entity.entity import Entity
from nordlys.logic.features.feature_cache import FeatureCache
from nordlys.services.el import EL
from nordlys.services.er import ER
from nordlys.services.tti import TTI
from nordlys.core.utils.logging_utils import RequestHandler
import logging, traceback
from time import strftime
from nordlys.config import LOGGING_PATH, PLOGGER, ELASTIC_INDICES
# Variables
DBPEDIA_INDEX = ELASTIC_INDICES[0]
__entity = Entity()
__elastic = ElasticCache(DBPEDIA_INDEX)
__fcache = FeatureCache()
app = Flask(__name__)
def error(str):
"""
@todo complete error handling
:param str:
:return:
"""
res = {"ERROR": str}
return jsonify(**res)
@app.route("/")
def index():
return "This is the Nordlys API"
@app.route("/ec/lookup_id/<path:entity_id>")
def catalog_lookup_id(entity_id):
entity = __entity.lookup_en(entity_id)
if entity is None:
return error("Entity ID '{}' does not exist".format(entity_id))
return jsonify(**entity)
@app.route("/ec/lookup_sf/dbpedia/<sf>")
def catalog_lookup_sf_dbpedia(sf):
ce = __entity.lookup_name_dbpedia(sf)
if ce is None or len(ce) == 0:
return error("Surface form '{}' does not exist".format(sf))
return jsonify(**ce)
@app.route("/ec/lookup_sf/facc/<sf>")
def catalog_lookup_sf_facc(sf):
ce = __entity.lookup_name_facc(sf)
if ce is None or len(ce) == 0:
return error("Surface form '{}' does not exist".format(sf))
return jsonify(**ce)
@app.route("/ec/freebase2dbpedia/<path:fb_id>")
def catalog_fb2dbp(fb_id):
dbp_ids = __entity.fb_to_dbp(fb_id)
if dbp_ids is None:
return error("Freebase ID '{}' does not exist".format(fb_id))
res = {"dbpedia_ids": dbp_ids}
return jsonify(**res)
@app.route("/ec/dbpedia2freebase/<path:dbp_id>")
def catalog_dbp2fb(dbp_id):
fb_ids = __entity.dbp_to_fb(dbp_id)
if fb_ids is None:
return error("DBpedia ID '{}' does not exist".format(dbp_id))
res = {"freebase_ids": fb_ids}
return jsonify(**res)
# /er?q=xx[&start=xx&field=xx&model=xx&smoothing_method=xx&smoothing_param=xx]
@app.route("/er")
def retrieval():
query = request.args.get("q", None)
if query is None:
return error("Query is not specified.")
config = {"first_pass": {}}
for param in ["fields_return", "1st_num_docs"]:
if request.args.get(param, None) is not None:
config["first_pass"][param] = request.args.get(param)
for param in ["index_name", "start", "num_docs", "model", "fields", "smoothing_method", "smoothing_param"]:
if request.args.get(param, None) is not None:
config[param] = request.args.get(param)
er = ER(config, __elastic)
res = er.retrieve(query)
return jsonify(**res)
@app.route("/el")
def entity_linking():
query = request.args.get("q", None)
if query is None:
return error("Query is not specified.")
config = {
"method": request.args.get("method", None),
"threshold": request.args.get("threshold", 0.1)
}
el = EL(config, __entity, __elastic, __fcache)
res = el.link(query)
PLOGGER.debug(res)
return jsonify(**res)
@app.route("/tti")
def entity_types():
query = request.args.get("q", None)
if query is None:
return error("Query is not specified.")
config = dict()
params = ["method", "num_docs", "start", "model", "ec_cutoff", "field", "smoothing_method", "smoothing_param"]
for param in params:
if request.args.get(param, None) is not None:
config[param] = request.args.get(param)
tti = TTI(config)
res = tti.identify(query)
return jsonify(**res)
@app.after_request
def after_request(response):
timestamp = strftime("[%Y-%m-%d %H:%M:%S]")
logger.info('%s %s %s %s %s %s',
timestamp, request.remote_addr, request.method,
request.scheme, request.full_path, response.status)
return response
@app.errorhandler(Exception)
def exceptions(e):
tb = traceback.format_exc()
timestamp = strftime("[%Y-%m-%d %H:%M:%S]")
logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\n%s',
timestamp, request.remote_addr, request.method,
request.scheme, request.full_path, tb)
return e.status_code
if __name__ == "__main__":
handler = RequestHandler(LOGGING_PATH)
logger = logging.getLogger('nordlys.requests')
logger.addHandler(handler.fh)
logger.setLevel(logging.DEBUG)
app.run(host="0.0.0.0")
|
medtray/MultiEm-RGCN | elasticsearch_index_data.py | from sklearn.metrics.pairwise import cosine_similarity
import matplotlib
import os
import json
from random import shuffle
from collections import Counter
from os import path
import sys
sys.path.append(path.abspath('nordlys'))
from nordlys.core.retrieval.elastic import Elastic
import pandas as pd
from utils import *
from tqdm import tqdm as tqdm
index_name = "new_data_index"
mappings = {
"attributes": Elastic.analyzed_field(),
"description": Elastic.analyzed_field(),
"data": Elastic.analyzed_field(),
"desc_att": Elastic.analyzed_field(),
"desc_att_data": Elastic.analyzed_field()
}
docs={}
path='wikiTables/data_fields_with_values.json'
with open(path) as f:
dt = json.load(f)
with tqdm(total=len(dt)) as pbar:
for ii,table_name in enumerate(dt):
test_table = dt[table_name]
#print(table_name)
attributes = test_table['attributes']
description = test_table['pgTitle']+' '+test_table['secondTitle']+' '+test_table['secondTitle']
data = test_table['data']
#description = preprocess(description, 'description')
#description = ' '.join(description)
#attributes = preprocess(attributes, 'attribute')
#attributes = ' '.join(attributes)
data = ' '.join(y for x in data for y in x)
if table_name not in docs:
docs[table_name] = {}
docs[table_name]['attributes'] = attributes
docs[table_name]['description'] = description
docs[table_name]['data'] = data
docs[table_name]['desc_att'] = description+' '+attributes
docs[table_name]['desc_att_data'] = description+' '+attributes+' '+data
pbar.update(1)
print('total number of tables is ', len(dt))
elastic = Elastic(index_name)
elastic.create_index(mappings, model='BM25',force=True)
elastic.add_docs_bulk(docs)
print("index has been built")
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/ftr_entity_mention.py | <reponame>medtray/MultiEm-RGCN
"""
FTR Entity Mention
==================
Implements features related to an entity-mention pair.
:Author: <NAME>
"""
from nordlys.logic.query.query import Query
TITLE = "<rdfs:label>"
SHORT_ABS = "<rdfs:comment>"
class FtrEntityMention(object):
def __init__(self, en_id, mention, entity):
self.__en_id = en_id
self.__mention = mention
self.__entity = entity
self.__en_doc = None
def __load_en(self):
if self.__en_doc is None:
en_doc = self.__entity.lookup_en(self.__en_id)
self.__en_doc = en_doc if en_doc is not None else {}
def commonness(self):
"""Computes probability of entity e being linked by mention: link (e,m)/link(m)
Returns zero if link(m) = 0
"""
fb_ids = self.__entity.dbp_to_fb(self.__en_id)
if fb_ids is None:
return 0
matches = self.__entity.lookup_name_facc(self.__mention).get("facc12", {})
total_occurrences = sum(list(matches.values()))
commonness = matches.get(fb_ids[0], 0) / total_occurrences if total_occurrences != 0 else 0
return commonness
def mct(self):
"""Returns True if mention contains the title of entity """
self.__load_en()
mct = 0
en_title = Query(self.__en_doc.get(TITLE, [""])[0]).query
if en_title in self.__mention:
mct = 1
return mct
def tcm(self):
"""Returns True if title of entity contains mention """
self.__load_en()
tcm = 0
en_title = Query(self.__en_doc.get(TITLE, [""])[0]).query
if self.__mention in en_title:
tcm = 1
return tcm
def tem(self):
"""Returns True if title of entity equals mention."""
self.__load_en()
tem = 0
en_title = Query(self.__en_doc.get(TITLE, [""])[0]).query
if self.__mention == en_title:
tem = 1
return tem
def pos1(self):
"""Returns position of the occurrence of mention in the short abstract."""
self.__load_en()
pos1 = 1000
s_abs = self.__en_doc.get(SHORT_ABS, [""])[0].lower()
if self.__mention in s_abs:
pos1 = s_abs.find(self.__mention)
return pos1
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/er/top_fields.py | <filename>nordlys/nordlys/logic/er/top_fields.py
"""
Top Fields
==========
This class returns top fields based on document frequency
:Author: <NAME>
"""
from nordlys.core.retrieval.elastic import Elastic
class TopFields(object):
DEBUG = 0
def __init__(self, elastic):
self.elastic = elastic
self.__fields = None
self.__fsdm_fields = {"names", "categories", "attributes", "similar_entity_names", "related_entity_names"}
@property
def fields(self):
if self.__fields is None:
self.__fields = set(self.elastic.get_fields())
return self.__fields
def get_top_term(self, en, n):
"""Returns top-n fields with highest document frequency for the given entity ID."""
doc_freq = {}
if self.DEBUG:
print("Entity:[" + en + "]")
for field in self.fields:
df = self.elastic.doc_freq(en, field)
if df > 0:
doc_freq[field] = df
top_fields = self.__get_top_n(doc_freq, n)
return top_fields
def __get_top_n(self, fields_freq, n):
"""Sorts fields and returns top-n."""
sorted_fields = sorted(fields_freq.items(), key=lambda item: (item[1], item[0]), reverse=True)
top_fields = dict()
i = 0
for field, freq in sorted_fields:
if i >= n:
break
if field in self.__fsdm_fields:
continue
i += 1
top_fields[field] = freq
if self.DEBUG:
print("(" + field + ", " + str(freq) + ")")
if self.DEBUG:
print("\nNumber of fields:", len(top_fields), "\n")
return top_fields
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/el/cmns.py | <gh_stars>10-100
"""
Commonness Entity Linking Approach
==================================
Class for commonness entity linking approach
:Author: <NAME>
"""
from collections import defaultdict
import sys
from nordlys.logic.el.el_utils import is_name_entity
from nordlys.logic.entity.entity import Entity
from nordlys.logic.query.mention import Mention
from nordlys.logic.query.query import Query
class Cmns(object):
def __init__(self, query, entity, threshold=None, cmns_th=0.1):
self.__query = query
self.__entity = entity
self.__threshold = threshold
self.__cmns_th = cmns_th
self.__ngrams = None
self.__ranked_ens = {}
self.__mentions = set()
def link(self):
"""Links the query to the entity.
dictionary {mention: (en_id, score), ..}
"""
self.rank_ens()
linked_ens = self.disambiguate()
return linked_ens
def rank_ens(self):
"""Detects mention and rank entities for each mention"""
self.__get_ngrams()
self.__recursive_rank_ens(len(self.__query.query.split()))
def __get_ngrams(self):
"""Returns n-grams grouped by length.
:return: dictionary {1:["xx", ...], 2: ["xx yy", ...], ...}
"""
if self.__ngrams is None:
self.__ngrams = defaultdict(list)
for ngram in self.__query.get_ngrams():
self.__ngrams[len(ngram.split())].append(ngram)
def __recursive_rank_ens(self, n):
"""Generates list of entities for each mention in the query.
The algorithm starts from the longest possible n-gram and gets all matched entities.
If no entities found, the algorithm recurse and tries to find entities with (n-1)-gram.
:param n: length of n-gram
:return: dictionary {(dbp_uri, fb_id):commonness, ..}
"""
if n == 0:
return
for ngram in self.__ngrams[n]:
if not self.__is_overlapping(ngram):
all_cand_ens = Mention(ngram, self.__entity, self.__cmns_th).get_cand_ens()
# Keeps only proper named entities (if applicable)
cand_ens = {}
for en_id, commonness in all_cand_ens.items():
if not is_name_entity(en_id):
continue
cand_ens[en_id] = commonness
if len(cand_ens) > 0:
self.__ranked_ens[ngram] = cand_ens
self.__mentions.add(ngram)
self.__recursive_rank_ens(n - 1)
def disambiguate(self):
"""Selects only one entity per mention.
:return [{"mention": xx, "entity": yy, "score": zz}, ...] #dictionary {mention: (en_id, score), ..}
"""
linked_ens = [] # {}
for men, ens in self.__ranked_ens.items():
sorted_ens = sorted(ens.items(), key=lambda x: x[1], reverse=True)
score = sorted_ens[0][1]
if score >= self.__threshold:
linked_ens.append({"mention": men, "entity": sorted_ens[0][0], "score": sorted_ens[0][1]})
# linked_ens[men] = sorted_ens[0]
return linked_ens
def __is_overlapping(self, ngram):
"""Checks whether the ngram is contained in one of the currently identified mentions."""
for mention in self.__mentions:
if ngram in mention:
return True
return False
def main(args):
entity = Entity()
query = Query(args[0])
cmns = Cmns(query, entity, cmns_th=0.1)
print(cmns.link())
if __name__ == "__main__":
main(sys.argv[1:])
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/entity/entity.py | """
Entity
======
Provides access to entity catalogs (DBpedia and surface forms).
:Author: <NAME>
"""
import sys
from nordlys.config import MONGO_HOST, MONGO_DB, MONGO_COLLECTION_DBPEDIA, MONGO_COLLECTION_SF_FACC, \
MONGO_COLLECTION_FREEBASE2DBPEDIA, MONGO_COLLECTION_SF_DBPEDIA
from nordlys.core.storage.mongo import Mongo
import json
class Entity(object):
def __init__(self):
self.__coll_dbpedia = None
self.__coll_sf_facc = None
self.__coll_sf_dbpedia = None
self.__coll_fb2dbp = None
def __init_coll_dbpedia(self):
"""Makes connection to the entity (DBpedia) collection."""
if self.__coll_dbpedia is None:
self.__coll_dbpedia = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_DBPEDIA)
def __init_coll_sf_facc(self):
"""Makes connection to the surface form collection."""
if self.__coll_sf_facc is None:
self.__coll_sf_facc = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_SF_FACC)
def __init_coll_sf_dbpedia(self):
"""Makes connection to the surface form collection."""
if self.__coll_sf_dbpedia is None:
self.__coll_sf_dbpedia = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_SF_DBPEDIA)
def __init_coll_fb2dbp(self):
"""Makes connection to Freebase2DBpedia collection."""
if self.__coll_fb2dbp is None:
self.__coll_fb2dbp = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_FREEBASE2DBPEDIA)
def lookup_en(self, entity_id):
"""Looks up an entity by its identifier.
:param entity_id: entity identifier ("<dbpedia:Audi_A4>")
:return A dictionary with the entity document or None.
"""
self.__init_coll_dbpedia()
return self.__coll_dbpedia.find_by_id(entity_id)
def lookup_name_facc(self, name):
"""Looks up a name in a surface form dictionary and returns all candidate entities."""
self.__init_coll_sf_facc()
res = self.__coll_sf_facc.find_by_id(name.lower())
return res if res else {}
def lookup_name_dbpedia(self, name):
"""Looks up a name in a surface form dictionary and returns all candidate entities."""
self.__init_coll_sf_dbpedia()
res = self.__coll_sf_dbpedia.find_by_id(name.lower())
return res if res else {}
def fb_to_dbp(self, fb_id):
"""Converts Freebase id to DBpedia; it returns list of DBpedia IDs."""
self.__init_coll_fb2dbp()
res = self.__coll_fb2dbp.find_by_id(fb_id)
return res["!<owl:sameAs>"] if res else None
def dbp_to_fb(self, dbp_id):
"""Converts DBpedia id to Freebase; it returns list of Freebase IDs."""
en = self.lookup_en(dbp_id)
if en is None:
return None
return en.get("fb:<owl:sameAs>", None)
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/eval/plot_diff.py | """
Plot Differences
================
Plots a series of scores which represent differences.
:Authors: <NAME>, <NAME>
"""
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pylab as plt
from nordlys.core.utils.file_utils import FileUtils
class QueryDiff(object):
SCORES = [25, 20, 10, 5, 0, -1, -5, -10]
def __init__(self):
self.width = 1 / 1
self.color = "blue"
def make_plot(self):
"""Make a bar plot using SCORES"""
N = len(self.SCORES)
x = range(N)
plt.bar(x, self.SCORES, self.width, color=self.color)
plt.show()
def create_pdf(self, diff_file, pdf_file, title="", xlabel="", ylabel="", aspect_ratio="equal", separator="\t"):
"""Create bar plot for differences in pdf.
This function is used to load difference .csv file,
create bar plot and store as a pfd file.
:pdf: created and saved pdf file
"""
data = FileUtils.read_file_as_list(diff_file)
scores = []
for item in data:
if "diff" in item: # ignore the first line(title)
continue
scores.append(float(item.split(separator)[3]))
scores = sorted(scores, reverse=True)
with PdfPages(pdf_file) as pdf:
n = len(scores)
x = range(n)
plt.figure(figsize=(4, 4))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.bar(x, scores, self.width, color=self.color)
plt.tight_layout() # warning,still working
pdf.savefig()
plt.close()
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/ml/__init__.py | """
Machine learning
================
The machine learning package is connected to `Scikit-learn <http://scikit-learn.org/stable/>`_ and can be used for learning-to-rank and classification purposes.
Usage
-----
For information on how to use the package from command line and set the configuration, read :doc:`ML usage <api/nordlys.core.ml.ml>`
Data format
-----------
The file format of the training and test files is `json`. Each instance is presented as a dictionary, consist of the following elements:
- **ID**: Instance id
- **Target**: The target value of the instance.
- **Features**: All the features, presented in key-value format. Note that all the instances should have the same set of features.
- **Properties**: All meta-data about the instance; e.g. query ID, or content.
Below is an excerpt from a json data file:
.. code:: python
{
"0": {
"properties": {
"query": "papaqui soccer",
"entity": "<dbpedia:Soccer_(1985_video_game)>"
},
"target": "0",
"features": {
"feat1": 1,
"feat2": 0,
"feat3": 25
}
},
"1": {}
}
.. note :: The sample files for using the ML package are provided under ``data/ml_sample/`` folder.
.. note :: Currently we provide support for Random Forest(RF) and Gradient Boosted Regression Trees (GBRT).
""" |
medtray/MultiEm-RGCN | nordlys/nordlys/core/ml/ml.py | <filename>nordlys/nordlys/core/ml/ml.py
"""
Machine leaning
===============
The command-line application for general-purpose machine learning.
Usage
-----
::
python -m nordlys.core.ml.ml <config_file>
Config parameters
------------------
- **training_set**: nordlys ML instance file format (MIFF)
- **test_set**: nordlys ML instance file format (MIFF); if provided then it's always used for testing. Can be left empty if cross-validation is used, in which case the remaining split is used for testing.
- **cross_validation**:
- k: number of folds (default: 10); use -1 for leave-one-out
- split_strategy: name of a property (normally query-id for IR problems). If set, the entities with the same value for that property are kept in the same split. if not set, entities are randomly distributed among splits.
- splits_file: JSON file with splits (instance_ids); if the file is provided it is used, otherwise it's generated
- create_splits: if True, creates the CV splits. Otherwise loads the splits from "split_file" parameter.
- **model**: ML model, currently supported values: rf, gbrt
- **category**: [regression | classification], default: "regression"
- **parameters**: dict with parameters of the given ML model
- If GBRT:
- alpha: learning rate, default: 0.1
- tree: number of trees, default: 1000
- depth: max depth of trees, default: 10% of number of features
- If RF:
- tree: number of trees, default: 1000
- maxfeat: max features of trees, default: 10% of number of features
- **model_file**: the model is saved to this file
- **load_model**: if True, loads the model
- **feature_imp_file**: Feature importance is saved to this file
- **output_file**: where output is written; default output format: TSV with with instance_id and (estimated) target
Example config
---------------
.. code:: python
{
"model": "gbrt",
"category": "regression",
"parameters":{
"alpha": 0.1,
"tree": 10,
"depth": 5
},
"training_set": "path/to/train.json",
"test_set": "path/to/test.json",
"model_file": "path/to/model.txt",
"output_file": "path/to/output.json",
"cross_validation":{
"create_splits": true,
"splits_file": "path/to/splits.json",
"k": 5,
"split_strategy": "q_id"
}
}
------------------------
:Authors: <NAME>, <NAME>
"""
import argparse
from sys import exit
import numpy
import pickle
from sklearn.ensemble import (GradientBoostingRegressor, GradientBoostingClassifier, RandomForestRegressor,
RandomForestClassifier)
from nordlys.core.ml.instances import Instances
from nordlys.core.ml.cross_validation import CrossValidation
from nordlys.config import PLOGGER
from nordlys.core.utils.file_utils import FileUtils
class ML(object):
def __init__(self, config):
self.__check_config(config)
self.__config = config
@staticmethod
def __check_config(config):
"""Checks config parameters and set default values."""
try:
# if "training_set" not in config:
# raise Exception("training_set is missing")
# if "output_file" not in config:
# raise Exception("output_file is missing")
if "cross_validation" in config:
if "splits_file" not in config["cross_validation"]:
raise Exception("splits_file is missing")
if "k" not in config["cross_validation"]:
config["cross_validation"]["k"] = 10
# else:
# if "test_set" not in config:
# raise Exception("test_set is missing")
except Exception as e:
PLOGGER.error("Error in config file: ", e)
exit(1)
def gen_model(self, num_features=None):
""" Reads parameters and generates a model to be trained.
:param num_features: int, number of features
:return untrained ranker/classifier
"""
model = None
if self.__config["model"].lower() == "gbrt":
alpha = self.__config["parameters"].get("alpha", 0.1)
tree = self.__config["parameters"].get("tree", 1000)
default_depth = round(num_features / 10.0) if num_features is not None else None
depth = self.__config["parameters"].get("depth", default_depth)
PLOGGER.info("Training instances using GBRT ...")
PLOGGER.info("Number of trees: " + str(tree) + "\tDepth of trees: " + str(depth))
if self.__config.get("category", "regression") == "regression":
PLOGGER.info("Training regressor")
model = GradientBoostingRegressor(n_estimators=tree, max_depth=depth, learning_rate=alpha)
else:
PLOGGER.info("Training the classifier")
model = GradientBoostingClassifier(n_estimators=tree, max_depth=depth, learning_rate=alpha)
elif self.__config["model"].lower() == "rf":
tree = self.__config["parameters"].get("tree", 1000)
default_maxfeat = round(num_features / 10.0) if num_features is not None else None
max_feat = self.__config["parameters"].get("maxfeat", default_maxfeat)
PLOGGER.info("Training instances using RF ...")
PLOGGER.info("Number of trees: " + str(tree) + "\tMax features: " + str(max_feat))
if self.__config.get("category", "regression") == "regression":
PLOGGER.info("Training regressor")
model = RandomForestRegressor(n_estimators=tree, max_features=max_feat)
else:
PLOGGER.info("Training classifier")
model = RandomForestClassifier(n_estimators=tree, max_features=max_feat)
return model
def train_model(self, instances):
"""Trains model on a given set of instances.
:param instances: Instances object
:return: the learned model
"""
features = instances.get_all()[0].features
features_names = sorted(features.keys())
PLOGGER.info("Number of instances:\t" + str(len(instances.get_all())))
PLOGGER.info("Number of features:\t" + str(len(features_names)))
# Converts instances to Scikit-learn format : (n_samples, n_features)
n_samples = len(instances.get_all())
train_x = numpy.zeros((n_samples, len(features_names)))
train_y = numpy.empty(n_samples, dtype=object) # numpy.zeros(n_samples)
for i, ins in enumerate(instances.get_all()):
train_x[i] = [ins.features[ftr] for ftr in features_names]
if self.__config.get("category", "regression") == "regression":
train_y[i] = float(ins.target)
else:
train_y[i] = str(ins.target)
# training
model = self.gen_model(len(features))
model.fit(train_x, train_y)
# write the trained model to the file
if "model_file" in self.__config:
# @todo if CV is used we need to append the fold no. to the filename
PLOGGER.info("Writing trained model to {} ...".format(self.__config["model_file"]))
pickle.dump(model, open(self.__config["model_file"], "wb"))
if "feature_imp_file" in self.__config:
print(self.analyse_features(model, features_names))
return model
def analyse_features(self, model, feature_names):
""" Ranks features based on their importance.
Scikit uses Gini score to get feature importances.
:param model: trained model
:param feature_names: list of feature names
"""
# we sort the features to make sure that are in the same order as they used while training.
# This is especially important when the function is called outside "train_model" function.
feature_names = sorted(feature_names)
# gets feature importance
importances = zip(feature_names, model.feature_importances_)
sorted_importances = sorted(importances, key=lambda imps: imps[1], reverse=True)
feat_imp_str = "=========== Feature Importance ===========\n"
for feat, importance in sorted_importances:
feat_imp_str += feat + "\t" + str(importance) + "\n"
feat_imp_str += "=========================================="
open(self.__config["feature_imp_file"], "w").write(feat_imp_str)
return feat_imp_str
def apply_model(self, instances, model):
"""Applies model on a given set of instances.
:param instances: Instances object
:param model: trained model
:return: Instances
"""
PLOGGER.info("Applying model ... ")
if len(instances.get_all()) > 0:
features_names = sorted(instances.get_all()[0].features.keys())
for ins in instances.get_all():
test_x = numpy.array([[ins.features[ftr] for ftr in features_names]])
if self.__config.get("category", "regression") == "regression":
ins.score = model.predict(test_x)[0]
else: # classification
ins.target = str(model.predict(test_x)[0])
# "predict_proba" gets class probabilities; an array of probabilities for each class e.g.[0.99, 0.1]
ins.score = model.predict_proba(test_x)[0][1]
return instances
def output(self, instances):
"""Writes results to output file.
:param instances: Instances object
"""
with open(self.__config["output_file"], "w") as f:
f.write("id\tscore\n") # output to file
PLOGGER.info("id\ttarget\tscore\n")
for ins in instances.get_all():
f.write(ins.id + "\t" + "{0:.5f}".format(ins.score) + "\n") # output to file
PLOGGER.info("Output saved in: " + self.__config["output_file"])
def run(self):
# load training instances
ins_train = Instances.from_json(self.__config["training_set"])
# Cross Validation
if "cross_validation" in self.__config:
cv = CrossValidation(self.__config["cross_validation"]["k"], ins_train, self.train_model, self.apply_model)
split_strategy = self.__config["cross_validation"].get("split_strategy", None)
split_file = self.__config["cross_validation"]["splits_file"]
# Always creates new splits if the create_flag is True
if bool(self.__config["cross_validation"].get("create_splits", False)) is True:
cv.create_folds(split_strategy)
cv.save_folds(split_file)
# New splits will be created only if the provided split_file does not exist.
else:
cv.get_folds(split_file, split_strategy)
inss = cv.run()
# classic test-train split
else:
ins_test = Instances.from_json(self.__config["test_set"])
model = self.train_model(ins_train)
inss = self.apply_model(ins_test, model)
# output results (which are stored in inss)
inss.to_json(self.__config["output_file"])
# inss.to_treceval(self.__config["output_file"])
# self.output(inss)
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="config file", type=str)
args = parser.parse_args()
return args
def main(args):
config = FileUtils.load_config(args.config)
ml = ML(config)
ml.run()
if __name__ == "__main__":
main(arg_parser())
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/es_example.py |
from elasticsearch import Elasticsearch
es=Elasticsearch()
body={
'author': '<NAME>',
'blog': 'Learning Elasticsearch',
'title': 'Using Python with Elasticsearch',
'tags': ['python', 'elasticsearch', 'tips'],
}
es.index(index='test_index', doc_type='post', id=1, body=body)
tokens = es.indices.analyze(index='test_index', body={'text': 'author er tet +56 t h + 484'})
ss={
"aggs" : {
"grades_stats" : { "stats" : { "field" : "author" } }
}
}
aa=es.search(index='test_index',body=ss)
print('done')
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/el/el_utils.py | <reponame>medtray/MultiEm-RGCN
"""
EL Utils
========
Utility methods for entity linking.
Author: <NAME>
"""
from nordlys import config
from nordlys.config import PLOGGER
def load_kb_snapshot(kb_file):
"""Loads DBpedia Snapshot of proper name entities (used for entity linking)."""
if config.KB_SNAPSHOT is None:
PLOGGER.info("Loading KB snapshot of proper named entities ...")
kb_snapshot = set()
with open(kb_file, "r") as f:
for line in f:
kb_snapshot.add(line.strip())
config.KB_SNAPSHOT = kb_snapshot
def is_name_entity(en_id):
"""Returns true if the entity is considered as proper name entity."""
if (config.KB_SNAPSHOT is not None) and (en_id not in config.KB_SNAPSHOT):
return False
return True
def to_elq_eval(annotations, output_file):
"""Write entity annotations to ELQ evaluation format.
:param linked_ens: {qid:[{"mention":xx, "entity": yy, "score":zz}, ..], ..}
"""
uniq_annots = set()
out_str = ""
for qid, q_annots in sorted(annotations.items()):
for annot in q_annots["results"]:
if (qid, annot["entity"]) not in uniq_annots:
out_str += qid + "\t" + str(annot["score"]) + "\t" + annot["entity"] + "\n"
uniq_annots.add((qid, annot["entity"]))
open(output_file, "w").write(out_str)
PLOGGER.info("ELQ evaluation file: " + output_file)
|
medtray/MultiEm-RGCN | prepare_wikiTables_rdfs4.py | import numpy as np
import json
import os
from math import log
import scipy.sparse as sp
from utils_ import preprocess
from random import shuffle
import pandas as pd
from tqdm import tqdm
import random
from scipy.spatial.distance import cosine
from utils import loadWord2Vec,clean_str
from nltk.corpus import wordnet as wn
import re
from sklearn.feature_extraction.text import TfidfVectorizer
import os
from concurrent.futures import ThreadPoolExecutor,wait
import multiprocessing
import warnings
warnings.filterwarnings("ignore")
nb_threads=6
executor = ThreadPoolExecutor(nb_threads)
def prepare_table(test_table):
attributes = test_table['title']
pgTitle = test_table['pgTitle']
secondTitle = test_table['secondTitle']
caption = test_table['caption']
data = test_table['data']
pgTitle_feat = preprocess(pgTitle, 'description')
secondTitle_feat = preprocess(secondTitle, 'description')
caption_feat = preprocess(caption, 'description')
description = pgTitle_feat + secondTitle_feat + caption_feat
data_csv = pd.DataFrame(data, columns=attributes)
attributes = list(data_csv)
inter_att = ' '.join(attributes)
att_tokens_inter = preprocess(inter_att, 'attribute')
if len(att_tokens_inter) == 0:
data_csv = data_csv.transpose()
# vec_att = np.array(attributes).reshape(-1, 1)
data_csv_array = np.array(data_csv)
# data_csv_array = np.concatenate([vec_att, data_csv_array], axis=1)
if data_csv_array.size > 0:
attributes = data_csv_array[0, :]
data_csv = pd.DataFrame(data_csv_array, columns=attributes)
data_csv = data_csv.drop([0], axis=0).reset_index(drop=True)
else:
data_csv = data_csv.transpose()
all_att_tokens = []
for att in attributes:
att_tokens = preprocess(att, 'attribute')
all_att_tokens += att_tokens
original_attributes = all_att_tokens
values = data_csv.values
data = ' '.join(y for x in values for y in x)
data_tokens = preprocess(data, 'description')
return description,original_attributes,data_tokens
class PrepareGraph:
def __init__(self, queries, dt):
self.queries = queries
self.dt = dt
self.entities = {}
self.counter = -1
self.triples = ''
self.test_triples = ''
self.delimiter = '\t'
# self.relations=['has_value','has_attribute','has_description','has_term','not_relevant_to','somehow_relevant_to','relevant_to']
#self.relations = ['has_tfidf0', 'has_tfidf1', 'has_tfidf2', 'has_pmi0', 'has_pmi1', 'has_pmi2']
#self.relations = ['has_tfidf0', 'has_tfidf1', 'has_tfidf2', 'has_pmi0', 'has_pmi1', 'has_pmi2',
# 'has_cosine0','has_cosine1','has_cosine2','has_wordnet0','has_wordnet1',
# 'has_wordnet2']
self.relations = ['has_tfidf0', 'has_tfidf1', 'has_tfidf2', 'has_pmi0', 'has_pmi1', 'has_pmi2',
'has_cosine0', 'has_cosine1', 'has_cosine2', 'has_syn', 'has_hyper']
#self.relations = ['has_tfidf0', 'has_tfidf1', 'has_tfidf2', 'has_pmi0', 'has_pmi1', 'has_pmi2', 'has_term',
# 'has_description', 'has_attribute',
# 'not_relevant_to',
# 'somehow_relevant_to', 'relevant_to']
# self.relations = ['has_term', 'not_relevant_to',
# 'somehow_relevant_to', 'relevant_to']
def add_to_entities(self, tokens):
for tok in tokens:
if tok not in self.entities:
self.counter += 1
self.entities[tok] = self.counter
def add_links_docs(self, data,tables_id_file):
shuffle_doc_words_list_tables = []
tables_collection = set()
add_to_test=0.95
tables_id = []
collection_to_ind_tables = []
for j, line in enumerate(data):
#print(j)
table = line[2]
if j>500:
break
if table in tables_collection:
continue
table_data = self.dt[table]
pgTitle_feat = table_data['pgTitle']
secondTitle_feat = table_data['secondTitle']
caption_feat = table_data['caption']
if len(pgTitle_feat) > 0:
pgTitle_feat = pgTitle_feat.split(' ')
else:
pgTitle_feat = []
if len(secondTitle_feat) > 0:
secondTitle_feat = secondTitle_feat.split(' ')
else:
secondTitle_feat = []
if len(caption_feat) > 0:
caption_feat = caption_feat.split(' ')
else:
caption_feat = []
description = pgTitle_feat + secondTitle_feat + caption_feat
original_attributes = table_data['attributes']
if len(original_attributes) > 0:
original_attributes = original_attributes.split(' ')
else:
original_attributes = []
values = table_data['data']
if len(values) > 0:
values = values.split(' ')
else:
values = []
if table not in self.entities:
self.counter += 1
self.entities[table] = self.counter
tables_id.append(self.counter)
self.add_to_entities(description)
self.add_to_entities(original_attributes)
text_table = description + original_attributes
shuffle_doc_words_list_tables.append(' '.join(text_table))
tables_collection.add(table)
collection_to_ind_tables.append(table)
shuffle_doc_words_list_tables_tfid = shuffle_doc_words_list_tables.copy()
add_wikiTables_path=os.path.join('wikiTables','wikiPreprocessed.json')
with open(add_wikiTables_path) as f:
add_wikiTables = json.load(f)
list_of_categories = list(add_wikiTables.keys())
nb_files = len(list_of_categories)
mylist = list(range(nb_files))
shuffle(mylist)
list_of_categories = np.array(list_of_categories)[mylist]
list_of_categories=[]
nb_files_in_training = nb_files
with tqdm(total=nb_files_in_training) as pbar0:
for jj, category in enumerate(list_of_categories):
#print(jj)
dt = add_wikiTables[category]
list_of_tables = list(dt.keys())
nb_tables = len(list_of_tables)
mylist_tables = list(range(nb_tables))
shuffle(mylist_tables)
list_of_tables = np.array(list_of_tables)[mylist_tables]
nb_tables_in_training_file = nb_tables
for tab_id,table_name in enumerate(list_of_tables):
if tab_id>=nb_tables_in_training_file:
break
test_table = dt[table_name]
if table_name in self.dt:
continue
pgTitle_feat = test_table['pgTitle']
secondTitle_feat = test_table['secondTitle']
caption_feat = test_table['caption']
if len(pgTitle_feat) > 0:
pgTitle_feat = pgTitle_feat.split(' ')
else:
pgTitle_feat = []
if len(secondTitle_feat) > 0:
secondTitle_feat = secondTitle_feat.split(' ')
else:
secondTitle_feat = []
if len(caption_feat) > 0:
caption_feat = caption_feat.split(' ')
else:
caption_feat = []
description = pgTitle_feat + secondTitle_feat + caption_feat
original_attributes = test_table['attributes']
if len(original_attributes) > 0:
original_attributes = original_attributes.split(' ')
else:
original_attributes = []
text_table = description + original_attributes
shuffle_doc_words_list_tables_tfid.append(' '.join(text_table))
if random.random()>0.99999:
if table_name not in self.entities:
self.counter += 1
self.entities[table_name] = self.counter
tables_id.append(self.counter)
self.add_to_entities(description)
self.add_to_entities(original_attributes)
text_table = description + original_attributes
shuffle_doc_words_list_tables.append(' '.join(text_table))
tables_collection.add(table_name)
collection_to_ind_tables.append(table_name)
pbar0.update(1)
np.save(tables_id_file, tables_id)
def buil_vocab(shuffle_doc_words_list):
# build vocab
word_freq = {}
word_set = set()
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
for word in words:
word_set.add(word)
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
vocab = list(word_set)
word_id_map = {}
for i,w in enumerate(vocab):
word_id_map[w] = i
return vocab,word_id_map
tables_vocab,word_id_map_tables = buil_vocab(shuffle_doc_words_list_tables)
tables_vocab_size = len(tables_vocab)
def prepare_for_word_doc_freq(shuffle_doc_words_list):
word_doc_list = {}
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
appeared = set()
for word in words:
if word in appeared:
continue
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
appeared.add(word)
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
return word_doc_freq
word_doc_freq_tables = prepare_for_word_doc_freq(shuffle_doc_words_list_tables_tfid)
def calculate_wordnet_syn_triples(vocab,vocab_size):
hyper_dict={}
with tqdm(total=vocab_size) as pbar4:
for word in vocab:
synsets = wn.synsets(clean_str(word.strip()))
for syn in synsets:
triple = word + self.delimiter + 'has_syn' + self.delimiter + syn._name + '\n'
if syn._name not in self.entities:
self.counter += 1
self.entities[syn._name] = self.counter
self.triples += triple
if random.random() > add_to_test:
self.test_triples += triple
stack=synsets
while stack:
el=stack.pop()
hypers=el.hypernyms()
stack+=hypers
for hyper in hypers:
if el._name not in hyper_dict:
triple = el._name + self.delimiter + 'has_hyper' + self.delimiter + hyper._name + '\n'
if el._name not in self.entities:
self.counter += 1
self.entities[el._name] = self.counter
if hyper._name not in self.entities:
self.counter += 1
self.entities[hyper._name] = self.counter
self.triples += triple
if random.random() > add_to_test:
self.test_triples += triple
hyper_dict[el._name]=[hyper._name]
else:
val=hyper_dict[el._name]
if hyper._name not in val:
triple = el._name + self.delimiter + 'has_hyper' + self.delimiter + hyper._name + '\n'
if el._name not in self.entities:
self.counter += 1
self.entities[el._name] = self.counter
if hyper._name not in self.entities:
self.counter += 1
self.entities[hyper._name] = self.counter
self.triples += triple
if random.random() > add_to_test:
self.test_triples += triple
hyper_dict[el._name].append(hyper._name)
pbar4.update(1)
pass
# def calculate_wordnet_syn(vocab,vocab_size):
#
# tfidf_vec = TfidfVectorizer(max_features=1000, stop_words='english')
# definitions = []
# with tqdm(total=vocab_size) as pbar4:
# for word in vocab:
# word = word.strip()
# synsets = wn.synsets(clean_str(word))
# hypers = []
# word_defs = []
# for synset in synsets:
# syn_def = synset.definition()
# word_defs.append(syn_def)
# hypers += synset.hypernyms()
#
# stack = hypers.copy()
# all_hypers = []
# while stack:
# el = stack.pop()
# all_hypers.append(el)
# hypers = el.hypernyms()
# stack += hypers
#
# all_hypers = list(set(all_hypers))
#
# for hyper in all_hypers:
# syn_def = hyper.definition()
# word_defs.append(syn_def)
#
# word_des = ' '.join(word_defs)
# if word_des == '':
# word_des = '<PAD>'
# definitions.append(word_des)
#
# pbar4.update(1)
#
# #string = '\n'.join(definitions)
#
# tfidf_matrix = tfidf_vec.fit_transform(definitions)
# tfidf_matrix_array = tfidf_matrix.toarray()
# #print(tfidf_matrix_array[0], len(tfidf_matrix_array[0]))
#
# word_vectors = []
#
# for i in range(len(vocab)):
# word = vocab[i]
# vector = tfidf_matrix_array[i]
# str_vector = []
# for j in range(len(vector)):
# str_vector.append(str(vector[j]))
# temp = ' '.join(str_vector)
# word_vector = word + ' ' + temp
# word_vectors.append(word_vector)
#
# string = '\n'.join(word_vectors)
#
# dataset = 'wikiTables'
#
# f = open('./' + dataset + '/word_vectors_wordnet.txt', 'w')
# f.write(string)
# f.close()
#
# word_vector_file = './' + dataset + '/word_vectors_wordnet.txt'
# _, _, word_vector_map = loadWord2Vec(word_vector_file)
#
# manager = multiprocessing.Manager()
# return_dict = manager.dict()
#
# start_indices=[]
# end_indices=[]
# step=int(len(vocab)/nb_threads)
# for ii in range(nb_threads):
# start_indices.append(ii*step)
# if ii==nb_threads-1:
# end_indices.append(len(vocab))
# else:
# end_indices.append((ii+1) * step)
#
# print(start_indices)
# print(end_indices)
#
# def func(start_index, end_index, thread_index, return_dict):
# row = []
# col = []
# weight = []
#
# with tqdm(total=end_index - start_index) as pbar4:
# for i in range(start_index, end_index):
# for j in range(i, vocab_size):
# vector_i = np.array(word_vector_map[vocab[i]])
# vector_j = np.array(word_vector_map[vocab[j]])
# similarity = 1.0 - cosine(vector_i, vector_j)
# if similarity > 0.5:
# # print(vocab[i], vocab[j], similarity)
# row.append(i)
# col.append(j)
# weight.append(similarity)
#
# pbar4.update(1)
#
# return_dict[thread_index] = {0: row, 1: col, 2: weight}
#
#
# processes=[]
#
# for thread_index in range(nb_threads):
# p = multiprocessing.Process(target=func, args=(start_indices[thread_index],end_indices[thread_index],thread_index,return_dict,))
# processes.append(p)
# p.start()
#
# for process in processes:
# process.join()
#
#
# final_rows=[]
# final_cols=[]
# final_weights=[]
# for i in range(nb_threads):
# final_rows+=return_dict[i][0]
# final_cols+=return_dict[i][1]
# final_weights+=return_dict[i][2]
#
# wordnet_between_words = sp.csr_matrix(
# (final_weights, (final_rows, final_cols)), shape=(vocab_size, vocab_size))
#
# return wordnet_between_words
#
#
# def wordnet_to_triple(wordnet_between_words, vocab, nbins):
#
# entity_index, word_index = wordnet_between_words.nonzero()
# pmi_values = wordnet_between_words.data
#
# arr = np.copy(pmi_values)
#
# mean = np.mean(pmi_values, axis=0)
# sd = np.std(pmi_values, axis=0)
#
# final_list = [x for x in arr if (x > mean - 2 * sd)]
# final_list = [x for x in final_list if (x < mean + 2 * sd)]
#
# start = np.min(final_list)
# end = np.max(final_list)
# step = (end - start) / nbins
#
# bins = []
# for i in range(nbins - 1):
# bins.append(start + step * (i + 1))
# inds = np.digitize(pmi_values, bins)
#
# manager = multiprocessing.Manager()
# return_dict = manager.dict()
#
# start_indices = []
# end_indices = []
# step = int(len(entity_index) / nb_threads)
# for ii in range(nb_threads):
# start_indices.append(ii * step)
# if ii == nb_threads - 1:
# end_indices.append(len(entity_index))
# else:
# end_indices.append((ii + 1) * step)
#
# print(start_indices)
# print(end_indices)
#
# def func(start_index, end_index, thread_index,return_dict):
# triples=''
# test_triples=''
# with tqdm(total=end_index - start_index) as pbar4:
# for k in range(start_index,end_index):
# entity = vocab[entity_index[k]]
# wordd = vocab[word_index[k]]
# if k==end_index-1:
# triple = entity + self.delimiter + 'has_wordnet' + str(
# inds[k]) + self.delimiter + wordd
# else:
# triple = entity + self.delimiter + 'has_wordnet' + str(
# inds[k]) + self.delimiter + wordd + '\n'
#
# # if inds[k]==2:
# # triple += entity + self.delimiter + 'has_pmi' + str(1) + self.delimiter + wordd + '\n'
# triples += triple
# if k == end_index - 1:
# test_triples += triple
# else:
# if random.random() > add_to_test:
# test_triples += triple
#
# pbar4.update(1)
#
# return_dict[thread_index] ={0:triples,1:test_triples}
#
# processes = []
#
# for thread_index in range(nb_threads):
# p = multiprocessing.Process(target=func, args=(
# start_indices[thread_index], end_indices[thread_index], thread_index,return_dict,))
# processes.append(p)
# p.start()
#
# for process in processes:
# process.join()
#
# for i in range(nb_threads):
# if i==nb_threads-1:
# self.triples += return_dict[i][0]
# print(len(self.triples))
# self.test_triples += return_dict[i][1]
# else:
# self.triples += return_dict[i][0] + '\n'
# print(len(self.triples))
# self.test_triples += return_dict[i][1] + '\n'
def calculate_cosine_similarity(vocab, vocab_size):
#word_vector_file = '/home/mohamedt/ARCI/glove.6B.50d.txt'
word_vector_file = '/home/mohamed/PycharmProjects/glove.6B/glove.6B.50d.txt'
_,_, word_vector_map = loadWord2Vec(word_vector_file)
manager = multiprocessing.Manager()
return_dict = manager.dict()
start_indices = []
end_indices = []
step = int(len(vocab) / nb_threads)
for ii in range(nb_threads):
start_indices.append(ii * step)
if ii == nb_threads - 1:
end_indices.append(len(vocab))
else:
end_indices.append((ii + 1) * step)
print(start_indices)
print(end_indices)
def func(start_index, end_index, thread_index, return_dict):
row = []
col = []
weight = []
with tqdm(total=end_index-start_index) as pbar4:
for i in range(start_index,end_index):
for j in range(i, vocab_size):
if vocab[i] in word_vector_map and vocab[j] in word_vector_map:
vector_i = np.array(word_vector_map[vocab[i]])
vector_j = np.array(word_vector_map[vocab[j]])
similarity = 1.0 - cosine(vector_i, vector_j)
if similarity > 0.5:
# print(vocab[i], vocab[j], similarity)
row.append(i)
col.append(j)
weight.append(similarity)
pbar4.update(1)
return_dict[thread_index] = {0: row, 1: col, 2: weight}
processes = []
for thread_index in range(nb_threads):
p = multiprocessing.Process(target=func, args=(
start_indices[thread_index], end_indices[thread_index], thread_index, return_dict,))
processes.append(p)
p.start()
for process in processes:
process.join()
final_rows = []
final_cols = []
final_weights = []
for i in range(nb_threads):
final_rows += return_dict[i][0]
final_cols += return_dict[i][1]
final_weights += return_dict[i][2]
del manager
cosine_between_words = sp.csr_matrix(
(final_weights, (final_rows, final_cols)), shape=(vocab_size, vocab_size))
return cosine_between_words
def cosine_to_triple(cosine_between_words, vocab, nbins):
entity_index, word_index = cosine_between_words.nonzero()
pmi_values = cosine_between_words.data
arr = np.copy(pmi_values)
mean = np.mean(pmi_values, axis=0)
sd = np.std(pmi_values, axis=0)
final_list = [x for x in arr if (x > mean - 2 * sd)]
final_list = [x for x in final_list if (x < mean + 2 * sd)]
start = np.min(final_list)
end = np.max(final_list)
step = (end - start) / nbins
bins = []
for i in range(nbins - 1):
bins.append(start + step * (i + 1))
inds = np.digitize(pmi_values, bins)
manager = multiprocessing.Manager()
return_dict = manager.dict()
start_indices = []
end_indices = []
step = int(len(entity_index) / nb_threads)
for ii in range(nb_threads):
start_indices.append(ii * step)
if ii == nb_threads - 1:
end_indices.append(len(entity_index))
else:
end_indices.append((ii + 1) * step)
print(start_indices)
print(end_indices)
def func(start_index, end_index, thread_index, return_dict):
triples = ''
test_triples = ''
nb_samples=0
with tqdm(total=end_index - start_index) as pbar4:
for k in range(start_index, end_index):
nb_samples+=1
entity = vocab[entity_index[k]]
wordd = vocab[word_index[k]]
if k == end_index - 1:
triple = entity + self.delimiter + 'has_cosine' + str(
inds[k]) + self.delimiter + wordd
else:
triple = entity + self.delimiter + 'has_cosine' + str(
inds[k]) + self.delimiter + wordd + '\n'
# if inds[k]==2:
# triple += entity + self.delimiter + 'has_pmi' + str(1) + self.delimiter + wordd + '\n'
triples += triple
if k == end_index - 1:
test_triples += triple
else:
if random.random() > add_to_test:
test_triples += triple
pbar4.update(1)
return_dict[thread_index] = {0: triples, 1: test_triples,2:nb_samples}
processes = []
for thread_index in range(nb_threads):
p = multiprocessing.Process(target=func, args=(
start_indices[thread_index], end_indices[thread_index], thread_index, return_dict,))
processes.append(p)
p.start()
for process in processes:
process.join()
ll=0
for i in range(nb_threads):
ll+=len(return_dict[i][0].split('\n'))
self.triples += return_dict[i][0] + '\n'
#print(len(self.triples))
self.test_triples += return_dict[i][1] + '\n'
#print(ll)
def calcualte_pmi(shuffle_doc_words_list, word_id_map, vocab, vocab_size):
# word co-occurence with context windows
window_size = 20
windows = []
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
# print(length, length - window_size + 1)
for j in range(length - window_size + 1):
window = words[j: j + window_size]
windows.append(window)
# print(window)
word_window_freq = {}
with tqdm(total=len(windows)) as pbar:
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
pbar.update(1)
word_pair_count = {}
with tqdm(total=len(windows)) as pbar2:
for kk,window in enumerate(windows):
# if kk==58:
# print('here')
window.sort()
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_j = window[j]
if word_i not in word_id_map or word_j not in word_id_map:
continue
word_i_id = word_id_map[word_i]
word_j_id = word_id_map[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + ',' + str(word_j_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
# two orders
# word_pair_str = str(word_j_id) + ',' + str(word_i_id)
# if word_pair_str in word_pair_count:
# word_pair_count[word_pair_str] += 1
# else:
# word_pair_count[word_pair_str] = 1
pbar2.update(1)
row = []
col = []
weight = []
# pmi as weights
num_window = len(windows)
with tqdm(total=len(word_pair_count)) as pbar3:
for key in word_pair_count:
temp = key.split(',')
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log((1.0 * count / num_window) /
(1.0 * word_freq_i * word_freq_j / (num_window * num_window)))
# if pmi is None:
# print('pmiiiiiiiii')
#pmi=10*random.random()+10
if pmi <= 0:
pbar3.update(1)
#print('pmi({},{})=0'.format(vocab[i],vocab[j]))
continue
row.append(i)
col.append(j)
weight.append(pmi)
pbar3.update(1)
pmi_between_words = sp.csr_matrix(
(weight, (row, col)), shape=(vocab_size, vocab_size))
return pmi_between_words
def pmi_to_triple(pmi_between_words, vocab, nbins):
entity_index, word_index = pmi_between_words.nonzero()
pmi_values = pmi_between_words.data
arr = np.copy(pmi_values)
mean = np.mean(pmi_values, axis=0)
sd = np.std(pmi_values, axis=0)
final_list = [x for x in arr if (x > mean - 2 * sd)]
final_list = [x for x in final_list if (x < mean + 2 * sd)]
start = np.min(final_list)
end = np.max(final_list)
step = (end - start) / nbins
bins = []
for i in range(nbins - 1):
bins.append(start + step * (i + 1))
inds = np.digitize(pmi_values, bins)
manager = multiprocessing.Manager()
return_dict = manager.dict()
start_indices = []
end_indices = []
step = int(len(entity_index) / nb_threads)
for ii in range(nb_threads):
start_indices.append(ii * step)
if ii == nb_threads - 1:
end_indices.append(len(entity_index))
else:
end_indices.append((ii + 1) * step)
print(start_indices)
print(end_indices)
def func(start_index, end_index, thread_index, return_dict):
triples = ''
test_triples = ''
with tqdm(total=end_index - start_index) as pbar4:
for k in range(start_index, end_index):
entity = vocab[entity_index[k]]
wordd = vocab[word_index[k]]
if k == end_index - 1:
triple = entity + self.delimiter + 'has_pmi' + str(
inds[k]) + self.delimiter + wordd
else:
triple = entity + self.delimiter + 'has_pmi' + str(
inds[k]) + self.delimiter + wordd + '\n'
# if inds[k]==2:
# triple += entity + self.delimiter + 'has_pmi' + str(1) + self.delimiter + wordd + '\n'
triples += triple
if k == end_index - 1:
test_triples += triple
else:
if random.random() > add_to_test:
test_triples += triple
pbar4.update(1)
return_dict[thread_index] = {0: triples, 1: test_triples}
processes = []
for thread_index in range(nb_threads):
p = multiprocessing.Process(target=func, args=(
start_indices[thread_index], end_indices[thread_index], thread_index, return_dict,))
processes.append(p)
p.start()
for process in processes:
process.join()
for i in range(nb_threads):
self.triples += return_dict[i][0] + '\n'
#print(len(self.triples))
self.test_triples += return_dict[i][1] + '\n'
# doc word frequency
def tf_idf_calculation(shuffle_doc_words_list, word_id_map, word_doc_freq, vocab, vocab_size):
doc_word_freq = {}
with tqdm(total=len(shuffle_doc_words_list)) as pbar5:
for doc_id in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[doc_id]
words = doc_words.split()
for word in words:
word_id = word_id_map[word]
doc_word_str = str(doc_id) + ',' + str(word_id)
if doc_word_str in doc_word_freq:
doc_word_freq[doc_word_str] += 1
else:
doc_word_freq[doc_word_str] = 1
pbar5.update(1)
row = []
col = []
weight = []
with tqdm(total=len(shuffle_doc_words_list)) as pbar6:
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_word_set = set()
for word in words:
if word in doc_word_set:
continue
j = word_id_map[word]
key = str(i) + ',' + str(j)
freq = doc_word_freq[key]
row.append(i)
col.append(j)
idf = log(1.0 * len(shuffle_doc_words_list_tables_tfid) /
word_doc_freq[vocab[j]])
weight.append(freq * idf)
doc_word_set.add(word)
pbar6.update(1)
tf_idf = sp.csr_matrix(
(weight, (row, col)), shape=(len(shuffle_doc_words_list), vocab_size))
return tf_idf
def tf_idf_to_triple(tf_idf, collection_to_ind, vocab, nbins_tfidf):
entity_index, word_index = tf_idf.nonzero()
tfidf_values = tf_idf.data
arr = np.copy(tfidf_values)
mean = np.mean(tfidf_values, axis=0)
sd = np.std(tfidf_values, axis=0)
final_list = [x for x in arr if (x > mean - 2 * sd)]
final_list = [x for x in final_list if (x < mean + 2 * sd)]
start = np.min(final_list)
end = np.max(final_list)
step = (end - start) / nbins_tfidf
bins = []
for i in range(nbins_tfidf - 1):
bins.append(start + step * (i + 1))
inds = np.digitize(tfidf_values, bins)
manager = multiprocessing.Manager()
return_dict = manager.dict()
start_indices = []
end_indices = []
step = int(len(entity_index) / nb_threads)
for ii in range(nb_threads):
start_indices.append(ii * step)
if ii == nb_threads - 1:
end_indices.append(len(entity_index))
else:
end_indices.append((ii + 1) * step)
print(start_indices)
print(end_indices)
def func(start_index, end_index, thread_index, return_dict):
triples = ''
test_triples = ''
with tqdm(total=end_index - start_index) as pbar4:
for k in range(start_index, end_index):
entity = collection_to_ind[entity_index[k]]
wordd = vocab[word_index[k]]
if k == end_index - 1:
triple = entity + self.delimiter + 'has_tfidf' + str(
inds[k]) + self.delimiter + wordd
else:
triple = entity + self.delimiter + 'has_tfidf' + str(
inds[k]) + self.delimiter + wordd + '\n'
# if inds[k]==2:
# triple += entity + self.delimiter + 'has_pmi' + str(1) + self.delimiter + wordd + '\n'
triples += triple
if k == end_index - 1:
test_triples += triple
else:
if random.random() > add_to_test:
test_triples += triple
pbar4.update(1)
return_dict[thread_index] = {0: triples, 1: test_triples}
processes = []
for thread_index in range(nb_threads):
p = multiprocessing.Process(target=func, args=(
start_indices[thread_index], end_indices[thread_index], thread_index, return_dict,))
processes.append(p)
p.start()
for process in processes:
process.join()
for i in range(nb_threads):
if i == nb_threads - 1:
self.triples += return_dict[i][0]
# print(len(self.triples))
self.test_triples += return_dict[i][1]
else:
self.triples += return_dict[i][0] + '\n'
# print(len(self.triples))
self.test_triples += return_dict[i][1] + '\n'
# all_vocab = tables_vocab
all_vocab_size = len(tables_vocab)
#word_id_map_all_voab = word_id_map_tables
print('>>>>>start pmi between words')
# pmi_between_words = calcualte_pmi(all_doc_words_list, word_id_map_all_voab, all_vocab, all_vocab_size)
pmi_between_words = calcualte_pmi(shuffle_doc_words_list_tables_tfid, word_id_map_tables, tables_vocab,
all_vocab_size)
print('>>>>>start pmi to triples')
nbins_pmi = 3
pmi_to_triple(pmi_between_words, tables_vocab, nbins_pmi)
print('>>>>>start wordnet syns triples')
#calculate_wordnet_syn_triples(tables_vocab, all_vocab_size)
# wordnet_between_words=calculate_wordnet_syn(all_vocab[:1000], 1000)
# nbins_wordnet = 3
# wordnet_to_triple(wordnet_between_words, all_vocab, nbins_wordnet)
print('>>>>>start cosine similarity between words')
cosine_between_words = calculate_cosine_similarity(tables_vocab, all_vocab_size)
nbins_cosine = 3
print('>>>>>start cosine similarity to triples')
cosine_to_triple(cosine_between_words, tables_vocab, nbins_cosine)
print('>>>>>start tf-idf calculation')
tf_idf_tables = tf_idf_calculation(shuffle_doc_words_list_tables, word_id_map_tables, word_doc_freq_tables,
tables_vocab, tables_vocab_size)
nbins_tfidf = 3
print('>>>>>start tf-idf to triples')
# can be time consuming
tf_idf_to_triple(tf_idf_tables, collection_to_ind_tables, tables_vocab, nbins_tfidf)
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/ftr_entity.py | <reponame>medtray/MultiEm-RGCN
"""
FTR Entity
==========
Implements features related to an entity.
:Author: <NAME>
"""
IREDIRECT = "!<dbo:wikiPageRedirects>" # Inverse Redirect
WIKILINKS = "<dbo:wikiPageWikiLink>"
class FtrEntity(object):
def __init__(self, en_id, entity):
self.__en_id = en_id
self.__en_doc = entity.lookup_en(en_id)
def redirects(self):
"""Number of redirect pages linking to the entity"""
reds = self.__en_doc.get(IREDIRECT, [])
return len(set(reds))
def outlinks(self):
""" Number of entity out-links"""
links = self.__en_doc.get(WIKILINKS, [])
return len(set(links))
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/elr/field_mapping.py | <reponame>medtray/MultiEm-RGCN<filename>nordlys/nordlys/logic/elr/field_mapping.py
"""
Field Mapping for ELR
=====================
Computes PRMS field mapping probabilities.
:Author: <NAME>
"""
from __future__ import division
import argparse
import json
from pprint import pprint
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.scorer import ScorerPRMS
from nordlys.logic.elr.top_fields import TopFields
class FieldMapping(object):
DEBUG = 0
MAPPING_DEBUG = 0
def __init__(self, elastic_uri, n):
self.elastic_uri = elastic_uri
self.n_fields = n
def map(self, en_id):
"""
Gets PRMS mapping probability for a clique type
:return Dictionary {field: weight, ..}
"""
top_fields = TopFields(self.elastic_uri).get_top_term(en_id, self.n_fields)
scorer_prms = ScorerPRMS(self.elastic_uri, None, {'fields': top_fields})
field_weights = scorer_prms.get_mapping_prob(en_id)
return field_weights
def load_entities(annot_file, th=0.1):
annots = json.load(open(annot_file, "r"))
entities = set()
for qid, annot in annots.items():
for item in annot["annots"]:
if item["score"] >= th:
entities.add(item["entity"])
return entities
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="json input file", type=str)
parser.add_argument("-th", help="EL score threshold", type=float, default=0.1)
parser.add_argument("-n", help="EL score threshold", type=int, default=10)
args = parser.parse_args()
return args
def main(args):
entities = load_entities(args.input, args.th)
mapper = FieldMapping(ElasticCache("dbpedia_2015_10_uri"), args.n)
mappings = {}
i = 0
for en_id in entities:
mappings[en_id] = mapper.map(en_id)
i += 1
if i % 10 == 0:
print(i, "entities processed!")
input_file = args.input[:args.input.rfind(".")]
out_file = input_file + "_mapping" + ".json"
json.dump(mappings, open(out_file, "w"), indent=4, sort_keys=True)
print("Output file:", out_file)
if __name__ == "__main__":
main(arg_parser()) |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/fusion/late_fusion_scorer.py | """
Late Fusion Scorer
==================
Class for late fusion scorer (i.e., document-centric model).
:Authors: <NAME>, <NAME>, <NAME>
"""
from nordlys.logic.fusion.fusion_scorer import FusionScorer
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.retrieval_results import RetrievalResults
from nordlys.core.retrieval.scorer import Scorer, ScorerLM
from nordlys.core.retrieval.retrieval import Retrieval
class LateFusionScorer(FusionScorer):
def __init__(self, index_name, retr_model, retr_params, num_docs=None,
field="content", run_id="fusion", num_objs=100, assoc_mode=FusionScorer.ASSOC_MODE_BINARY,
assoc_file=None):
"""
:param index_name: name of index
:param assoc_file: document-object association file
:param assoc_mode: document-object weight mode, uniform or binary
:param retr_model: the retrieval model; valid values: "lm", "bm25"
:param retr_params: config including smoothing method and parameter
:param num_objs: the number of ranked objects for a query
:param assoc_mode: the fusion weights, which could be binary or uniform
:param assoc_file: object-doc association file
"""
super(LateFusionScorer, self).__init__(index_name, association_file=assoc_file, run_id=run_id)
self.__config = {
"index_name": self._index_name,
"first_pass": {
"num_docs": num_docs,
"field": field
},
}
self._field = field
self._num_docs = num_docs
self._model = retr_model
self._params = retr_params
self._assoc_mode = assoc_mode
self._num = num_objs
self._elastic = ElasticCache(self._index_name)
def score_query(self, query, assoc_fun=None):
"""
Scores a given query.
:param query: query string.
:return: a RetrievalResults instance.
:func assoc_fun: function to return a list of docs for an obeject
"""
scorer = None
# setting the configurations
self.__config["field"] = self._field # Needed for 2nd-pass
self.__config["model"] = self._model # Needed for 2nd-pass
if self._model == "lm":
self.__config["second_pass"] = {
"field": self._field
}
for param in ["smoothing_method", "smoothing_param"]:
if self.__config.get(param, None) is not None:
self.__config["second_pass"][param] = self._params.get(param, None)
scorer = ScorerLM.get_scorer(self._elastic, query, self.__config)
else:
# print("LF config = {}".format(self.__config))
# scorer = None # scorer for default model
pass # TODO add the BM25 case body
# retrieving documents
res = Retrieval(self.__config).retrieve(query, scorer)
# getting the doc-to-object mappings
if assoc_fun is not None:
for doc_id, _ in res.items():
self.assoc_doc[doc_id] = assoc_fun(doc_id)
# scoring objects, i.e., computing P(q|o)
pqo = dict()
for i, item in enumerate(list(res.keys())):
if self._num_docs is not None and i + 1 == self._num_docs: # consider only top documents
break
doc_id = item
doc_score = res[doc_id].get("score", 0)
if doc_id in self.assoc_doc:
for object_id in self.assoc_doc[doc_id]:
if self._assoc_mode == FusionScorer.ASSOC_MODE_BINARY:
w_do = 1
elif self._assoc_mode == FusionScorer.ASSOC_MODE_UNIFORM:
w_do = 1 / len(self.assoc_obj[object_id])
else:
w_do = 0 # this should never happen
pqo[object_id] = pqo.get(object_id, 0) + doc_score * w_do
return RetrievalResults(pqo)
|
medtray/MultiEm-RGCN | nordlys/nordlys/services/ec.py | <gh_stars>10-100
"""
Entity catalog
==============
Command line end point for entity catalog
Usage
-----
python -m nordlys.services.ec -o <operation> -i <input>
Examples
--------
- python -m nordlys.services.ec -o lookup_id -i <dbpedia:Audi_A4>
- python -m nordlys.services.ec -o "lookup_sf_dbpedia" -i "audi a4"
- python -m nordlys.services.ec -o "lookup_sf_facc" -i "audi a4"
- python -m nordlys.services.ec -o "dbpedia2freebase" -i "<dbpedia:Audi_A4>"
- python -m nordlys.services.ec -o "freebase2dbpedia" -i "<fb:m.030qmx>"
:Author: <NAME>
"""
import argparse
from pprint import pprint
from nordlys.logic.entity.entity import Entity
OPERATIONS = {"lookup_id", "lookup_sf_dbpedia", "lookup_sf_facc", "freebase2dbpedia", "dbpedia2freebase"}
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--operation", help="Name of operation", choices=OPERATIONS)
parser.add_argument("-i", "--input", help="input entity id/name", type=str)
args = parser.parse_args()
return args
def main(args):
en = Entity()
if args.operation == "lookup_id":
res = en.lookup_en(args.input)
elif args.operation == "lookup_sf_dbpedia":
res = en.lookup_name_dbpedia(args.input)
elif args.operation == "lookup_sf_facc":
res = en.lookup_name_facc(args.input)
elif args.operation == "freebase2dbpedia":
res = en.fb_to_dbp(args.input)
elif args.operation == "dbpedia2freebase":
res = en.dbp_to_fb(args.input)
pprint(res)
if __name__ == "__main__":
main(arg_parser())
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/query/mention.py | """
Mention
=======
Class for entity mentions (used for entity linking)
- Generates all candidate entities for a mention
- Computes commonness for a mention-entity pairs
"""
import sys
from pprint import pprint
from nordlys.logic.entity.entity import Entity
class Mention(object):
def __init__(self, mention, entity, cmns_th=None):
self.__mention = mention.lower()
self.__entity = entity
self.__cmns_th = cmns_th
def get_cand_ens(self):
"""Returns all candidate entities for the mention
:return: {en:cmn_score}
"""
facc_matches = self.__get_facc_matches(self.__entity.lookup_name_facc(self.__mention))
cand_ens = self.__filter_uncommon_ens(facc_matches) if self.__cmns_th else facc_matches
dbpedia_matches = self.__get_dbpedia_matches(self.__entity.lookup_name_dbpedia(self.__mention))
for en_id in dbpedia_matches:
if en_id not in facc_matches:
cand_ens[en_id] = 0
else:
cand_ens[en_id] = facc_matches[en_id]
return cand_ens
def __get_dbpedia_matches(self, matches):
"""Returns list of DBpedia matches."""
dbp_ens = []
for field, match in matches.items():
if field == "_id":
continue
dbp_ens += list(match.keys())
return set(dbp_ens)
def __get_facc_matches(self, matches):
"""Returns entities matching the mention according to FACC.
- Computes commonness for each entity (if needed)
- Converts Freebase IDs to DBpedia
"""
# computes the denominator for commonness
facc_matches = matches.get("facc12", {})
if self.__cmns_th:
facc_matches = self.__get_commonness_scores(facc_matches)
# converts freebased IDs to DBpedia
facc_ens = {}
for entity_id, val in facc_matches.items():
dbp_ids = self.__entity.fb_to_dbp(entity_id)
if dbp_ids is None:
continue
for dbp_id in dbp_ids:
facc_ens[dbp_id] = val
return facc_ens
def __get_commonness_scores(self, en_counts):
"""Computes commonness score for a all entities matching the mention.
:param en_counts: dictionary {entity_id: count, ...}
:return: commonness scores {entity_id: commonness, ...}
"""
commonness_scores = {}
total_occurrences = sum(en_counts.values())
for en, count in en_counts.items():
commonness_scores[en] = count / total_occurrences
return commonness_scores
def __filter_uncommon_ens(self, en_cmns):
"""Filters out entities that are below the commonness threshold.
:param en_cmns: dictionary {entity_id: count, ...}
:return: filtered dictionary
"""
filtered_ens = {}
for en, cmns in en_cmns.items():
if cmns >= self.__cmns_th:
filtered_ens[en] = cmns
return filtered_ens
def main(args):
entity = Entity()
mention = Mention(args[0], entity, cmns_th=0.1)
ens = mention.get_cand_ens()
print(ens)
if __name__ == "__main__":
main(sys.argv[1:]) |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/er/scorer_elr.py | """
ELR Scorer
==========
Scorer for MRF based models. Supports FSDM and ELR-based models.
:Author: <NAME>
"""
from __future__ import division
import argparse
import json
import math
from collections import defaultdict
from pprint import pprint
from nordlys.core.ml.instance import Instance
from nordlys.core.ml.instances import Instances
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.scorer import ScorerPRMS
from nordlys.core.utils.entity_utils import EntityUtils
from nordlys.core.utils.file_utils import FileUtils
from nordlys.logic.elr.top_fields import TopFields
class ScorerELR(object):
DEBUG = 0
def __init__(self, elastic_uri, query_annot, query_len, params, n_fields=10):
self.elastic_uri = elastic_uri
self.query_annot = query_annot
self.query_len = query_len
self.n_fields = n_fields
self.lambda_T = params[0]
self.lambda_E = params[1]
self.E = self.normalize_el_scores(query_annot)
@staticmethod
def normalize_el_scores(scores):
"""Normalize entity linking score, so that sum of all scores equal to 1"""
normalized_scores = {}
sum_score = sum(scores.values())
for item, score in scores.items():
normalized_scores[item] = score / sum_score
return normalized_scores
def get_field_weights(self, uri):
"""
Gets PRMS mapping probability for a clique type
:return Dictionary {field: weight, ..}
"""
print("Computing field weights ...")
# top_fields = TopFields(self.elastic_uri).get_top_term(uri, self.n_fields)
top_fields = ["catchall"]
scorer_prms = ScorerPRMS(self.elastic_uri, None, {'fields': top_fields})
field_weights = scorer_prms.get_mapping_prob(uri)
return field_weights
def get_uri_prob(self, doc_id, field, e, lambd=0.1):
"""
P(e|d_f) = P(e|d_f)= (1 - lambda) tf(e, d_f)+ lambda df(f, e) / df(f)
:param doc_id: document id
:param field: field name
:param e: entity uri
:param lambd: smoothing parameter
:return: P(e|d_f)
"""
if self.DEBUG:
print("\t\tf:", field)
tf = self.elastic_uri.term_freqs(doc_id, field)
tf_e_d_f = 1 if tf.get(e, 0) > 0 else 0
df_f_e = self.elastic_uri.doc_freq(e, field)
df_f = self.elastic_uri.doc_count(field)
p_e_d_f = ((1 - lambd) * tf_e_d_f) + (lambd * df_f_e / df_f)
if self.DEBUG:
print("\t\t\ttf(e,d_f):", tf_e_d_f, "df(f, e):", df_f_e, "df(f):", df_f, "P(e|d_f):", p_e_d_f)
return p_e_d_f
def get_p_e_d(self, e, field_weights, doc_id):
"""
p(e|d) = sum_{f in F} p(e|d_f) p(f|e)
:param e: entity URI
:param field_weights: Dictionary {f: p_f_t, ...}
:param doc_id: entity id
:return p(e|d)
"""
if self.DEBUG:
print("\te:", e)
p_e_d = 0
for f, p_f_e in field_weights.items():
p_e_d_f = self.get_uri_prob(doc_id, f, e)
p_e_d += p_e_d_f * p_f_e
if self.DEBUG:
print("\t\tp(e|d_f):", p_e_d_f, "p(f|e):", p_f_e, "p(e|d_f).p(f|e):", p_e_d_f * p_f_e)
if self.DEBUG:
print("\tp(e|d):", p_e_d)
return p_e_d
def score_doc(self, doc_id, field_mappings=None):
"""
P(q|e) = lambda_T sum_{t}P(t|d) + lambda_E sum_{e}P(e|d)
:param doc_id: document id
:param term_sim: term-base similarity (e.g. LM, SDM, and FSDM)
:param: length of query
:return: p(q|d)
"""
if self.DEBUG:
print("Scoring doc ID=" + doc_id)
p_E_d = 0
if self.lambda_E != 0:
for e, score in self.E.items():
field_weights = field_mappings[e] if field_mappings else self.get_field_weights(e)
catchall_weight = {"catchall": field_weights.get("catchall", 0)}
p_e_d = self.get_p_e_d(e, catchall_weight, doc_id)
if p_e_d != 0:
p_E_d += score * math.log(p_e_d)
return p_E_d
# p_T_d = term_sim / self.query_len
# p_q_d = (self.lambda_T * p_T_d) + (self.lambda_E * p_E_d)
# if self.DEBUG:
# print("\t\tp(E|d):", p_E_d)
#
# # Adds the current doc to instances
# # self.gen_ins(doc_id, {"p_T_d": p_T_d, "p_E_d": p_E_d})
# return p_q_d
def load_run(run_file):
"""Loads a run file to the memory
:param run_file: A trec run file
:return {query: {enId: score, ..}, ...}
"""
run = defaultdict(dict)
with open(run_file, "r") as f:
for line in f:
cols = line.strip().split()
run[cols[0]][cols[2]] = float(cols[4])
return run
def load_annot(annot_file, th=0.1):
"""Reads TAGME annotation file and generates a dictionary
:return {qid: {en_id: score, ..}, ..}
"""
annots = defaultdict(dict)
tagme_annots = json.load(open(annot_file, "r"))
for qid, annot in tagme_annots.items():
for item in annot["annots"]:
if item["score"] >= th:
en_id = item["entity"]
annots[qid][en_id] = item["score"]
return annots
def get_mapping_query(query_annots, mappings):
query_mappings = {}
for en_id in query_annots:
query_mappings[en_id] = mappings[en_id]
return query_mappings
def trec_format(results, query_id, run_id):
"""Outputs results in TREC format"""
out_str = ""
rank = 1
for doc_id, score in sorted(results.items(), key=lambda x: x[1], reverse=True):
out_str += query_id + "\tQ0\t" + doc_id + "\t" + str(rank) + "\t" + str(score) + "\t" + run_id + "\n"
rank += 1
return out_str
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="config file", type=str)
args = parser.parse_args()
return args
def main(args):
config = FileUtils.load_config(args.config)
elastic_term = ElasticCache(config["text_index"])
lambdas = config.get("lambdas", [0.9, 0.1])
queries = json.load(open(config["query_file"], "r"))
mappings = json.load(open(config["mapping_file"], "r"))
annots = load_annot(config["annot_file"])
run = load_run(config["run_file"])
instances = Instances()
# gets the results
out_file = open(config["output_file"], "w")
qid_int = 0
for qid, query in sorted(queries.items()):
print("Scoring ", qid, "...")
results, libsvm_str = {}, ""
query_len = len(elastic_term.analyze_query(query).split())
scorer = ScorerELR(ElasticCache(config["uri_index"]), annots[qid], query_len, lambdas)
for doc_id, p_T_d in sorted(run[qid].items()):
query_mappings = get_mapping_query(annots[qid], mappings)
p_E_d = scorer.score_doc(doc_id, query_mappings)
properties = {'doc_id': doc_id, 'query': query, 'qid': qid, 'qid_int': qid_int}
features = {'p_T_d': p_T_d, 'p_E_d': p_E_d}
ins = Instance(qid + "_" + doc_id, features=features, properties=properties)
instances.add_instance(ins)
# libsvm_str += ins.to_libsvm(qid_prop="qod_int")
results[doc_id] = (lambdas[0] * p_T_d) + (lambdas[1] * p_E_d)
qid_int += 1
# Write trec format
out_str = trec_format(results, qid, "elr")
out_file.write(out_str)
out_file.close()
print("Output file:", config["output_file"])
instances.to_json(config["json_file"])
print("Output file:", config["json_file"])
if __name__ == "__main__":
main(arg_parser())
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/elastic_cache.py | """
Elastic Cache
=============
This is a cache for elastic index stats; a layer between an index and retrieval.
The statistics (such as document and term frequencies) are first read from the index and stay in the memory for further
usages.
Usage hints
-----------
- Only one instance of Elastic cache needs to be created.
- If running out of memory, you need to create a new object of ElasticCache.
- The class also caches termvectors. To further boost efficiency, you can load term vectors for multiple documents using :func:`ElasticCache.multi_termvector`.
:Author: <NAME>
"""
from collections import defaultdict
from nordlys.core.retrieval.elastic import Elastic
class ElasticCache(Elastic):
def __init__(self, index_name):
super(ElasticCache, self).__init__(index_name)
# Cached variables
self.__num_docs = None
self.__num_fields = None
self.__doc_count = {}
self.__coll_length = {}
self.__avg_len = {}
self.__doc_length = defaultdict(dict)
self.__doc_freq = defaultdict(dict)
self.__coll_term_freq = defaultdict(dict)
self.__tv = defaultdict(dict)
self.__coll_tv = defaultdict(dict)
def __get_termvector(self, doc_id, field):
"""Returns a term vector for a given document and field."""
if self.__coll_tv.get(doc_id, {}).get(field, None):
return self.__coll_tv[doc_id][field]
if self.__tv.get(doc_id, {}).get(field, None) is None:
self.__tv[doc_id][field] = self._get_termvector(doc_id, field)
return self.__tv[doc_id][field]
def __get_coll_termvector(self, term, field):
"""Returns a term vector containing collection stats of a term."""
body = {"query": {"bool": {"must": {"term": {field: term}}}}}
hits = self.search_complex(body, num=1)
doc_id = next(iter(hits.keys())) if len(hits) > 0 else None
if self.__coll_tv.get(doc_id, {}).get(field, None) is None:
self.__coll_tv[doc_id][field] = self._get_termvector(doc_id, field, term_stats=True) if doc_id else {}
return self.__coll_tv[doc_id][field]
def num_docs(self):
"""Returns the number of documents in the index."""
if self.__num_docs is None:
self.__num_docs = super(ElasticCache, self).num_docs()
return self.__num_docs
def num_fields(self):
"""Returns number of fields in the index."""
if self.__num_fields is None:
self.__num_fields = super(ElasticCache, self).num_fields()
return self.__num_fields
def doc_count(self, field):
"""Returns number of documents with at least one term for the given field."""
if field not in self.__doc_count:
self.__doc_count[field] = super(ElasticCache, self).doc_count(field)
return self.__doc_count[field]
def coll_length(self, field):
"""Returns length of field in the collection."""
if field not in self.__coll_length:
self.__coll_length[field] = super(ElasticCache, self).coll_length(field)
return self.__coll_length[field]
def avg_len(self, field):
"""Returns average length of a field in the collection."""
if field not in self.__avg_len:
self.__avg_len[field] = super(ElasticCache, self).avg_len(field)
return self.__avg_len[field]
def doc_length(self, doc_id, field):
"""Returns length of a field in a document."""
if self.__doc_length.get(doc_id, {}).get(field, None) is None:
self.__doc_length[doc_id][field] = sum(self.term_freqs(doc_id, field).values())
return self.__doc_length[doc_id][field]
def doc_freq(self, term, field, tv=None):
"""Returns document frequency for the given term and field."""
if self.__doc_freq.get(field, {}).get(term, None) is None:
tv = self.__get_coll_termvector(term, field)
self.__doc_freq[field][term] = super(ElasticCache, self).doc_freq(term, field, tv=tv)
return self.__doc_freq[field][term]
def coll_term_freq(self, term, field, tv=None):
""" Returns collection term frequency for the given field."""
if self.__coll_term_freq.get(field, {}).get(term, None) is None:
tv = self.__get_coll_termvector(term, field)
self.__coll_term_freq[field][term] = super(ElasticCache, self).coll_term_freq(term, field, tv=tv)
return self.__coll_term_freq[field][term]
def term_freqs(self, doc_id, field, tv=None):
"""Returns term frequencies for a given document and field."""
tv = self.__get_termvector(doc_id, field)
return super(ElasticCache, self).term_freqs(doc_id, field, tv)
def term_freq(self, doc_id, field, term):
"""Returns frequency of a term in a given document and field."""
return self.term_freqs(doc_id, field).get(term, 0)
def multi_termvector(self, doc_ids, field, batch=50):
"""Returns term vectors for a given document and field."""
i = 0
while i < len(doc_ids):
j = i + batch if i + batch <= len(doc_ids) else len(doc_ids)
tvs = self._get_multi_termvectors(doc_ids[i:j], field)
for doc_id, tv in tvs.items():
if tv != {}:
self.__tv[doc_id][field] = tv
i += batch
|
medtray/MultiEm-RGCN | LTR/prepare_embedding.py | import sys
import os
cwd = os.getcwd()
from pathlib import Path
path = Path(cwd)
sys.path.append(os.path.join(path.parent.absolute()))
from collections import Counter
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from random import randint
import json
import pandas as pd
from utils_ import *
from sklearn import preprocessing
from dgl.nn.pytorch import RelGraphConv
from load_model_for_testing import *
parser = argparse.ArgumentParser(description='Compute embeddings for MultiEm-RGCN', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--device', type=int, default=0)
parser.add_argument("--n-hidden", type=int, default=100,
help="number of hidden units")
parser.add_argument("--n-bases", type=int, default=10,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--dataset", type=str, default='wikiTables',
help="dataset to use")
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--regularization", type=float, default=0.01,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
args = parser.parse_args()
#torch.cuda.set_device(args.device)
#args.device='cuda:'+str(args.device)
args.use_cuda=False
cwd=os.getcwd()
parent_path=Path(cwd).parent
args.parent_path=parent_path
data = RGCNLinkDataset(args.dataset)
data.dir = os.path.join(args.parent_path, args.dataset)
dir_base = data.dir
print(dir_base)
data.load()
num_nodes = data.num_nodes
train_data = data.train
num_rels = data.num_rels
# check cuda
use_cuda = args.use_cuda
if use_cuda:
torch.cuda.set_device(args.device)
# create model
model = LinkPredict(num_nodes,
args.n_hidden,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers,
dropout=args.dropout,
use_cuda=use_cuda,
reg_param=args.regularization)
# build test graph
test_graph, test_rel, test_norm,_ = utils.build_test_graph(
num_nodes, num_rels, train_data)
#model_state_file = './wikiTables/model_state.pth'
model_state_file = os.path.join(dir_base, 'model_state.pth')
model = load_checkpoint_for_eval(model, model_state_file,args.device)
test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
test_rel = torch.from_numpy(test_rel)
test_norm = node_norm_to_edge_norm(test_graph, torch.from_numpy(test_norm).view(-1, 1))
wv={}
model.cpu()
model.eval()
print('start calculating embedding')
embed = model(test_graph, test_node_id, test_rel, test_norm)
print('finish calculating embedding')
nb_tokens,dim=embed.shape
entities_file = os.path.join(dir_base, 'entities.dict')
entities=open(entities_file,'r')
lines=entities.readlines()
with tqdm(total=len(lines)) as pbar0:
for line in lines:
line=line.strip()
line=line.split('\t')
wv[line[1]]=embed[int(line[0])].tolist()
#print(line)
pbar0.update(1)
wv['unk']=torch.tensor(list(np.random.rand(dim))).tolist()
wv['.'] =torch.tensor(list(np.random.rand(dim))).tolist()
wv[','] = torch.tensor(list(np.random.rand(dim))).tolist()
word_to_index = {}
index_to_word = []
for i,key in enumerate(wv.keys()):
word_to_index[key]=i
index_to_word.append(key)
torch.save(embed,os.path.join(dir_base, 'wv.pt'))
print('start saving')
np.save(os.path.join(dir_base, 'wv.npy'),wv)
print('finish saving')
np.save(os.path.join(dir_base, 'word_to_index.npy'),word_to_index)
np.save(os.path.join(dir_base, 'index_to_word.npy'),index_to_word)
|
medtray/MultiEm-RGCN | LTR/kfolds_joint_embedding.py | <filename>LTR/kfolds_joint_embedding.py<gh_stars>0
import torch.nn.functional as F
import torch
from torch import nn
from collections import OrderedDict
from torch.autograd import Variable
from torch.utils.data import Dataset,DataLoader
#device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device='cuda'
from joint_model import JointModel
from data_reader_jm import DataAndQueryJM
import os
import numpy as np
import torch.nn.functional as F
import pandas as pd
import subprocess
from matplotlib import pyplot as plt
from sklearn.model_selection import KFold
import random
import argparse
import sys
cwd = os.getcwd()
from pathlib import Path
path = Path(cwd)
sys.path.append(os.path.join(path.parent.absolute()))
parser = argparse.ArgumentParser(description='Train MultiEm-RGCN', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--emsize', type=int, default=100)
parser.add_argument('--max_query_len', type=int, default=150)
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--filters_1d', type=int, nargs='+', default=[100], help='Decrease learning rate at these epochs.')
parser.add_argument('--kernel_size_1d', type=int, nargs='+', default=[5], help='Decrease learning rate at these epochs.')
parser.add_argument('--maxpool_size_1d', type=int, nargs='+', default=[2], help='Decrease learning rate at these epochs.')
parser.add_argument('--nbins', type=int, default=5)
parser.add_argument("--n-hidden", type=int, default=100,
help="number of hidden units")
parser.add_argument("--n-bases", type=int, default=10,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--dataset", type=str, default='wikiTables',
help="dataset to use")
parser.add_argument("--dropout", type=float, default=0.2,
help="dropout probability")
parser.add_argument("--regularization", type=float, default=0.01,
help="regularization weight")
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225], help='Decrease learning rate at these epochs.')
parser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1], help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')
args = parser.parse_args()
print(torch.cuda.current_device())
torch.cuda.set_device(args.device)
print(torch.cuda.current_device())
print(torch.cuda.get_device_name(args.device))
print(torch.cuda.is_available())
args.device='cuda:'+str(args.device)
args.use_cuda=True
# args.use_cuda=False
# args.device='cpu'
out_str = str(args)
print(out_str)
loss_function=nn.MSELoss()
import os
from pathlib import Path
def kernal_mus(n_kernels):
"""
get the mu for each gaussian kernel. Mu is the middle of each bin
:param n_kernels: number of kernels (including exact match). first one is exact match
:return: l_mu, a list of mu.
"""
l_mu = [1]
if n_kernels == 1:
return l_mu
bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]
l_mu.append(1 - bin_size / 2) # mu: middle of the bin
for i in range(1, n_kernels - 1):
l_mu.append(l_mu[i] - bin_size)
return l_mu
def kernel_sigmas(n_kernels):
"""
get sigmas for each gaussian kernel.
:param n_kernels: number of kernels (including exactmath.)
:param lamb:
:param use_exact:
:return: l_sigma, a list of simga
"""
bin_size = 2.0 / (n_kernels - 1)
l_sigma = [0.001] # for exact match. small variance -> exact match
if n_kernels == 1:
return l_sigma
l_sigma += [0.1] * (n_kernels - 1)
return l_sigma
args.mu = kernal_mus(args.nbins)
args.sigma = kernel_sigmas(args.nbins)
cwd = os.getcwd()
parent_path=Path(cwd).parent
args.parent_path=parent_path
data_folder=os.path.join(parent_path,args.dataset)
text_file = open(os.path.join(data_folder,"qrels.txt"), "r")
lines = text_file.readlines()
queries_id_qrels = []
list_lines_qrels = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split('\t')
queries_id_qrels += [aa[0]]
list_lines_qrels.append(aa)
def load_checkpoint(model, optimizer, losslogger, filename):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
losslogger = checkpoint['losslogger']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model, optimizer, start_epoch, losslogger
def load_checkpoint_for_eval(model, filename):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
def read_file_for_nfcg(file):
text_file = open(file, "r")
lines = text_file.readlines()
queries_id = []
list_lines = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split('\t')
queries_id += [aa[0]]
list_lines.append(aa)
inter = np.array(list_lines)
return inter
def test_output(test_iter, model):
#model = load_checkpoint_for_eval(model, save_path)
# move the model to GPU if has one
model=model.to(args.device)
# need this for dropout
model.eval()
epoch_loss = 0
num_batches = len(test_iter)
all_outputs = []
all_labels=[]
for batch_desc_w,batch_att_w,batch_query_w,batch_desc_wn,batch_att_wn,batch_query_wn,batch_desc_QTE,batch_query_QTE,\
labels,batch_semantic in test_iter:
batch_desc_w, batch_att_w, batch_query_w, labels,batch_semantic = batch_desc_w.to(args.device), batch_att_w.to(args.device),\
batch_query_w.to(args.device), labels.to(args.device), batch_semantic.to(args.device)
batch_desc_wn, batch_att_wn, batch_query_wn = batch_desc_wn.to(args.device), batch_att_wn.to(args.device), \
batch_query_wn.to(args.device)
batch_desc_QTE, batch_query_QTE = batch_desc_QTE.to(args.device),batch_query_QTE.to(args.device)
batch_query_w = torch.squeeze(batch_query_w)
batch_desc_w = torch.squeeze(batch_desc_w)
batch_att_w = torch.squeeze(batch_att_w)
batch_query_wn = torch.squeeze(batch_query_wn)
batch_desc_wn = torch.squeeze(batch_desc_wn)
batch_att_wn = torch.squeeze(batch_att_wn)
batch_query_QTE = torch.squeeze(batch_query_QTE)
batch_desc_QTE = torch.squeeze(batch_desc_QTE)
batch_desc_w = torch.cat([batch_desc_w, batch_att_w], 1)
batch_desc_wn = torch.cat([batch_desc_wn, batch_att_wn], 1)
outputs = model(batch_query_w,batch_desc_w,batch_query_wn,batch_desc_wn,batch_query_QTE, batch_desc_QTE,batch_semantic).to(args.device)
# print(outputs)
# labels=torch.FloatTensor(labels)
#labels = labels / 2
loss = loss_function(outputs, labels.float())
#loss = listnet_loss(labels.float(), outputs)
epoch_loss += loss.item()
all_outputs += outputs.tolist()
all_labels += labels.tolist()
losslogger = epoch_loss / num_batches
#print(f'Testing loss = {losslogger}')
return all_outputs,losslogger,all_labels
def calculate_metrics(inter, output_file,all_outputs,ndcg_file):
inter2 = []
for jj, item in enumerate(inter):
item_inter = [i for i in item]
item_inter[4] = str(all_outputs[jj])
inter2.append(item_inter)
inter3 = np.array(inter2)
np.savetxt(output_file, inter3, fmt="%s")
#batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
batcmd = "./trec_eval -m map " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
map = float(res[2])
batcmd = "./trec_eval -m recip_rank " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
mrr = float(res[2])
batcmd = "./trec_eval -m ndcg_cut.5 " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg,map,mrr
def calculate_ndcg(inter, output_file,all_outputs,ndcg_file):
inter2 = []
for jj, item in enumerate(inter):
item_inter = [i for i in item]
item_inter[4] = str(all_outputs[jj])
inter2.append(item_inter)
inter3 = np.array(inter2)
np.savetxt(output_file, inter3, fmt="%s")
batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg
def qrel_for_data(data,list_lines_qrels,output_file):
#list_lines_qrels=np.array(list_lines_qrels)
df = pd.DataFrame(list_lines_qrels)
qrel_inter=[]
for i in range(len(data)):
row=data[i]
ii=df[((df[0] == row[0]) & (df[2] == row[2]))]
qrel_inter+=ii.values.tolist()
qrel_inter=np.array(qrel_inter)
np.savetxt(output_file, qrel_inter, fmt="%s",delimiter='\t')
def listnet_loss(y_i, z_i):
"""
y_i: (n_i, 1)
z_i: (n_i, 1)
"""
P_y_i = F.softmax(y_i, dim=0)
P_z_i = F.softmax(z_i, dim=0)
return - torch.sum(y_i * torch.log(P_z_i))
loss_function=nn.MSELoss()
batch_size=50
kfold = KFold(5, True, None)
data=read_file_for_nfcg(os.path.join(data_folder,"all.txt"))
#all_ind=np.arange(len(data))
#random.shuffle(all_ind)
#data=data[all_ind]
NUM_EPOCH=5
start_epoch=0
d_dropout = 0.2
final_results=[]
final_results_map=[]
final_results_mrr=[]
for _ in range(3):
all_test_max_ndcg = []
all_test_max_map = []
all_test_max_mrr = []
split_id = 0
for train, test in kfold.split(data):
ndcg_train = []
ndcg_test = []
split_id+=1
output_qrels_train='qrels_train'+str(split_id)+'.txt'
qrel_for_data(data[train], list_lines_qrels, output_qrels_train)
output_qrels_test = 'qrels_test'+str(split_id)+'.txt'
qrel_for_data(data[test], list_lines_qrels, output_qrels_test)
train_file_name = './train1_'+str(split_id)+'.txt'
np.savetxt(train_file_name, data[train], fmt="%s",delimiter='\t')
test_file_name = './test1_'+str(split_id)+'.txt'
np.savetxt(test_file_name, data[test], fmt="%s",delimiter='\t')
output_train_ndcg = './train1_ndcg_'+str(split_id)+'.txt'
output_test_ndcg = './test1_ndcg_'+str(split_id)+'.txt'
train_dataset = DataAndQueryJM(train_file_name,None,None,None,output_train_ndcg,args)
print(len(train_dataset))
train_iter = DataLoader(train_dataset, batch_size = batch_size, shuffle = False)
args.index_to_word=train_dataset.index_to_word
args.wv=train_dataset.wv
model = JointModel(args).to(args.device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.01,weight_decay=1e-8)
#optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-8)
losslogger=np.inf
save_path = './model2.pt'
#model, optimizer, start_epoch, losslogger=load_checkpoint(model, optimizer, losslogger, save_path)
test_dataset = DataAndQueryJM(test_file_name, train_dataset.wv, train_dataset.word_to_index,
train_dataset.index_to_word,output_test_ndcg,args)
print(len(test_dataset))
test_iter = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
inter_train = read_file_for_nfcg(output_train_ndcg)
inter_test = read_file_for_nfcg(output_test_ndcg)
all_att_w = train_dataset.all_att_w
all_desc_w = train_dataset.all_desc_w
all_query_w = train_dataset.all_query_w
all_att_wn = train_dataset.all_att_wn
all_desc_wn = train_dataset.all_desc_wn
all_query_wn = train_dataset.all_query_wn
all_desc_QTE = train_dataset.all_desc_QTE
all_query_QTE = train_dataset.all_query_QTE
all_semantic = train_dataset.all_semantic
all_query_labels = train_dataset.all_query_labels
all_labels = np.array(train_dataset.labels)
dict_label_pos = {}
for l in range(1, 61):
dict_label_pos[l] = [i for i, num in enumerate(all_query_labels) if num == l]
loss_train=[]
loss_test=[]
max_test_ndcg=0
max_test_map = 0
max_test_mrr = 0
for epoch in range(start_epoch, NUM_EPOCH + start_epoch):
model.train()
epoch_loss = 0
# num_batches = len(train_iter)
num_batches = 60
all_outputs = []
for l in range(1, 61):
if len(dict_label_pos[l]) > 0:
batch_desc_w = all_desc_w[dict_label_pos[l]].to(args.device)
batch_att_w = all_att_w[dict_label_pos[l]].to(args.device)
batch_query_w = all_query_w[dict_label_pos[l]].to(args.device)
batch_desc_wn = all_desc_wn[dict_label_pos[l]].to(args.device)
batch_att_wn = all_att_wn[dict_label_pos[l]].to(args.device)
batch_query_wn = all_query_wn[dict_label_pos[l]].to(args.device)
batch_desc_QTE = all_desc_QTE[dict_label_pos[l]].to(args.device)
batch_query_QTE = all_query_QTE[dict_label_pos[l]].to(args.device)
batch_semantic = all_semantic[dict_label_pos[l]].to(args.device)
labels = torch.tensor(all_labels[dict_label_pos[l]])
labels = labels.to(args.device)
batch_query_w=torch.squeeze(batch_query_w)
batch_desc_w = torch.squeeze(batch_desc_w)
batch_att_w = torch.squeeze(batch_att_w)
batch_query_wn = torch.squeeze(batch_query_wn)
batch_desc_wn = torch.squeeze(batch_desc_wn)
batch_att_wn = torch.squeeze(batch_att_wn)
batch_query_QTE = torch.squeeze(batch_query_QTE)
batch_desc_QTE = torch.squeeze(batch_desc_QTE)
batch_desc_w=torch.cat([batch_desc_w,batch_att_w],1)
batch_desc_wn = torch.cat([batch_desc_wn, batch_att_wn], 1)
outputs = model(batch_query_w,batch_desc_w,batch_query_wn,batch_desc_wn,batch_query_QTE,batch_desc_QTE
,batch_semantic).to(args.device)
# labels=torch.FloatTensor(labels)
all_outputs += outputs.tolist()
# labels=labels/2
# loss = loss_function(outputs, labels.float()).to(device)
loss = listnet_loss(labels.float(), outputs).to(args.device)
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
losslogger = epoch_loss / num_batches
#print(f'Training epoch = {epoch + 1}, epoch loss = {losslogger}')
# train_ndcg = calculate_ndcg(inter_train, 'scores.txt', all_outputs, 'qrels.txt')
train_ndcg = calculate_ndcg(inter_train, 'scores.txt', all_outputs, output_qrels_train)
ndcg_train.append(train_ndcg)
#print(ndcg_train)
outputs_test, testing_loss, _ = test_output(test_iter, model)
# test_ndcg = calculate_ndcg(inter_test, 'scores.txt', outputs_test, 'qrels.txt')
test_ndcg2 = calculate_ndcg(inter_test, 'scores.txt', outputs_test, output_qrels_test)
test_ndcg,test_map,test_mrr = calculate_metrics(inter_test, 'scores.txt', outputs_test, output_qrels_test)
if test_ndcg>max_test_ndcg:
max_test_ndcg=test_ndcg
# ndcg_test.append(test_ndcg)
# print(max_test_ndcg)
#
# if test_map>max_test_map:
# max_test_map=test_map
#
# if test_mrr>max_test_mrr:
# max_test_mrr=test_mrr
loss_train.append(losslogger)
loss_test.append(testing_loss)
#print(model.SRscore.weight)
all_test_max_ndcg.append(test_ndcg)
print(all_test_max_ndcg)
all_test_max_map.append(test_map)
#print(all_test_max_map)
all_test_max_mrr.append(test_mrr)
#print(all_test_max_mrr)
final_results+=all_test_max_ndcg
final_results_map += all_test_max_map
final_results_mrr += all_test_max_mrr
print('final results \n')
print(final_results)
print(len(final_results))
print('mean ndcg={}'.format(np.mean(final_results)))
print('std ndcg={}'.format(np.std(final_results)))
print(final_results_map)
print(len(final_results_map))
print('mean map={}'.format(np.mean(final_results_map)))
print('std map={}'.format(np.std(final_results_map)))
print(final_results_mrr)
print(len(final_results_mrr))
print('mean mrr={}'.format(np.mean(final_results_mrr)))
print('std mrr={}'.format(np.std(final_results_mrr)))
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/ml/cross_validation.py | """
Cross Validation
----------------
Cross-validation support.
We assume that instances (i) are uniquely identified by an instance ID and (ii) they have id and score properties.
We access them using the Instances class.
:Authors: <NAME>, <NAME>
"""
from os.path import isfile
import json
from random import shuffle
from nordlys.core.ml.instances import Instances
class CrossValidation(object):
"""
Class attributes:
fold: dict of folds (1..k) with a dict
{"training": [list of instance_ids]}, {"testing": [list of instance_ids]}
"""
def __init__(self, k, instances, callback_train, callback_test):
"""
:param k: number of folds
:param instances: Instances object
:param callback_train: Callback function for training model
:param callback_test: Callback function for applying model
"""
self.__k = k
self.__instances = instances
self.folds = None
self.callback_train = callback_train
self.callback_test = callback_test
def create_folds(self, group_by=None):
"""
Creates folds for the data set.
:param group_by: property to group by (instance_id by default)
"""
if group_by is not None:
# instances are grouped by the value of a given property
inss_dict = self.__instances.group_by_property(group_by)
else:
# each instance is a group on its own
inss_dict = {}
for ins in self.__instances.get_all():
inss_dict[ins.id] = [ins]
# shuffling
inss_keys = list(inss_dict.keys())
shuffle(inss_keys)
# determines the number of folds
num_folds = len(inss_keys) if self.__k == -1 else self.__k
# creates folds
self.folds = {}
for f in range(num_folds):
print("Generating fold " + str(f + 1) + "/" + str(num_folds))
fold = {"training": [], "testing": []}
for i, key in enumerate(inss_keys):
w = "testing" if i % num_folds == f else "training"
ins_ids = [ins.id for ins in inss_dict[key]]
fold[w] += ins_ids
self.folds[f] = fold
def get_instances(self, i, mode, property=None):
"""
Returns instances from the given fold i \in [0..k-1].
:param i: fold number
:param mode: training or testing
:return Instances object
"""
inss = Instances()
if property:
inss_by_prop = self.__instances.group_by_property(property)
for l in self.folds[i][mode]:
for ins in inss_by_prop[l]:
inss.add_instance(ins)
else:
for l in self.folds[i][mode]:
inss.add_instance(self.__instances.get_instance(l))
return inss
def get_folds(self, filename=None, group_by=None):
"""
Loads folds from file or generates them if the file doesn't exist.
:param filename:
:param k: number of folds
:return:
"""
if isfile(filename):
self.load_folds(filename)
else:
self.create_folds(group_by)
self.save_folds(filename)
def save_folds(self, filename):
"""Saves folds to (JSON) file."""
with open(filename, "w") as outfile:
json.dump(self.folds, outfile, indent=4)
def load_folds(self, filename):
"""Loads previously created folds from (JSON) file."""
json_data = open(filename)
self.folds = json.load(json_data)
if len(self.folds) != self.__k:
raise Exception("Error in splits file: number of folds mismatches!")
def run(self):
"""Runs cross-validation."""
# if folds haven't been initialized/created before (w/ get_folds or create_folds)
# then they'll be created using the default grouping (i.e., based on instance_id)
if self.folds is None:
self.create_folds()
# this holds the estimated target values (and also the confidence score, if available)
test_inss = Instances()
for i, fold in enumerate(self.folds):
print("=======================================")
print("Cross validation for fold " + str(i) + " ...")
model = self.callback_train(self.get_instances(fold, "training"))
fold_test_inss = self.callback_test(self.get_instances(fold, "testing"), model)
test_inss.append_instances(fold_test_inss.get_all())
return test_inss
def main():
# cv = CrossValidation(None, train_func, test_func)
# cv.create_folds(10)
# cv.save_folds("data/cv/splits.json")
# cv.run()
pass
if __name__ == "__main__":
main()
|
medtray/MultiEm-RGCN | nordlys/nordlys/__init__.py | <filename>nordlys/nordlys/__init__.py
"""
Nordlys toolkit is provided as the ``nordlys`` Python package.
Nordlys is delivered with an extensive and detailed documentation, from package-level overviews, to module usage \
examples, to fully documented classes and methods.
""" |
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/__init__.py | <reponame>medtray/MultiEm-RGCN
"""
Retrieval
=========
The retrieval package provides basic indexing and scoring functionality based on Elasticsearch (v2.3).
It can be used both for documents and for entities (as the latter are represented as fielded documents).
Indexing
--------
Indexing can be done be done by directly reading the content of documents.
The :mod:`~nordlys.core.retrieval.toy_indexer` module provides a toy example.
When the content of documents is stored in MongoDB (e.g., for DBpedia entities), use the :mod:`~nordlys.core.retrieval.indexer_mongo` module for indexing.
For further details on how this module can be used, see :mod:`~nordlys.core.data.dbpedia.indexer_dbpedia`.
For indexing Dbpedia entities, we read the content of entiteis form MongoDB aFor DBpedia entities, we store them on MongoDB and
.. todo:: Explain indexing (representing entities as fielded documents, mongo to elasticsearch)
Notes
~~~~~
- To speed up indexing, use :meth:`~nordlys.core.retrieval.elastic.Elastic.add_docs_bulk`. The optimal number of documents to send in a single bulk depends on the size of documents; you need to figure it out experimentally.
- We strongly recommend using the default Elasticsearch similarity (currently BM25) for indexing. (`Other similarity functions <https://www.elastic.co/guide/en/elasticsearch/reference/2.3/index-modules-similarity.html>`_ may be also used; in that case the similarity function can updated after indexing.)
- Our default setting is *not* to store term positions in the index (for efficiency considerations).
Retrieval
---------
Retrieval is done in two stages:
- *First pass*: The top ``N`` documents are retrieved using Elastic's default search method
- *Second pass*: The (expensive) scoring of the top ``N`` documents is performed (implemented in the Nordlys)
Nordlys currently supports the following models for second pass retrieval:
- Language modelling (LM) [1]
- Mixture of Language Modesl (MLM) [2]
- Probabilistic Model for Semistructured Data (PRMS) [3]
Check out :mod:`~nordlys.core.retrieval.scorer` module to get inspiration for implementing a new retrieval model.
Command line usage
~~~~~~~~~~~~~~~~~~
See :py:mod:`nordlys.core.retrieval.retrieval`
Notes
~~~~~
- Always use a :class:`~nordlys.core.retrieval.elastic_cache.ElasticCache` object (instead of :class:`~nordlys.core.retrieval.elastic.Elastic`) for getting stats from the index. This class stores index stats in the memory, which highly benefits efficiency.
- We recommend to create a new :class:`~nordlys.core.retrieval.elastic_cache.ElasticCache` object for each query. This way, you will make effiecnt of your machine's memory.
-------------------
[1] <NAME> and <NAME>. 1998. *A Language modeling approach to information retrieval*. In Proc. of SIGIR '98.
[2] <NAME> and <NAME>. 2003. *Combining document representations for known-item search*. Proc. of SIGIR '03.
[3] <NAME>, <NAME>, and <NAME>. 2009. *A probabilistic retrieval model for semistructured data*. In Proc. of ECIR '09.
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/storage/__init__.py | <filename>nordlys/nordlys/core/storage/__init__.py
"""This is the (**intro** of the) storage package doc. It's in the ``__init__.py`` of the package, as an example for the other packages, and IT SHOULD BE cleaned up without all of this ;).
No idea what text it could contain :)
It might contain, e.g., items in this way
* This
* That
and also code snippets like usual, using as usual these double colons and the indented code::
mongo = Mongo(host, db, collection)
And that's it.
(In the following I included all the up-to-now available modules, sectioned by no criteria in particular.)
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/index_wiki_data.py | <filename>nordlys/nordlys/core/retrieval/index_wiki_data.py
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib
import os
import json
from collections import Counter
import pandas as pd
from utils import *
data_folder='tables_redi2_1'
data_csv=pd.read_csv('features2.csv')
#attributes = list(data_csv)
test_data=data_csv['table_id']
query=data_csv['query']
relevance=data_csv['rel'] |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/el/greedy.py | <reponame>medtray/MultiEm-RGCN<gh_stars>10-100
"""
Generative model for interpretation set finding
@author: <NAME>
"""
from __future__ import division
from nordlys.core.ml.instances import Instances
class Greedy(object):
def __init__(self, score_th):
self.__score_th = score_th
def disambiguate(self, inss):
"""
Takes instances and generates set of entity linking interpretations.
:param inss: Instances object
:return: sets of interpretations [{mention: (en_id, score), ..}, ...]
"""
pruned_inss = self.prune_by_score(inss)
pruned_inss = self.prune_containment_mentions(pruned_inss)
interpretations = self.create_interpretations(pruned_inss)
return interpretations
def prune_by_score(self, query_inss):
""" prunes based on a static threshold of ranking score."""
valid_inss = []
for ins in query_inss.get_all():
if ins.score >= self.__score_th:
valid_inss.append(ins)
return Instances(valid_inss)
def prune_containment_mentions(self, query_inss):
"""Deletes containment mentions, if they have lower score."""
if len(query_inss.get_all()) == 0:
return query_inss
valid_inss = [] #dict() # {mention: ins}
valid_mens = set()
for ins in sorted(query_inss.get_all(), key=lambda item: item.score, reverse=True):
is_contained = False
cand_men = ins.get_property("mention")
for men in valid_mens:
if (cand_men != men) and ((cand_men in men) or (men in cand_men)):
is_contained = True
if not is_contained:
# valid_inss[ins.get_property("mention")] = ins
valid_inss.append(ins)
valid_mens.add(ins.get_property("mention")) # @todo: This line should be fixed
return Instances(valid_inss) #list(valid_inss.values()))
def create_interpretations(self, query_inss):
"""
Groups CER instances as interpretation sets.
:return list of interpretations, where each interpretation is a dictionary {mention: (en_id, score), ..}
"""
interpretations = [dict()] # list of dictionaries {men: ins}
for ins in sorted(query_inss.get_all(), key=lambda item:item.score, reverse=True):
added = False
for inter in interpretations:
mentions = list(inter.keys())
mentions.append(ins.get_property("mention"))
if not self.is_overlapping(mentions):
inter[ins.get_property("mention")] = (ins.get_property("en_id"), ins.score)
added = True
if not added:
interpretations.append({ins.get_property("mention"): (ins.get_property("en_id"), ins.score)})
return interpretations
def is_overlapping(self, mentions):
"""
Checks whether the strings of a set overlapping or not.
i.e. if there exists a term that appears twice in the whole set.
E.g. {"the", "music man"} is not overlapping
{"the", "the man", "music"} is overlapping.
NOTE: If a query is "yxxz" the mentions {"yx", "xz"} and {"yx", "x"} are overlapping.
:param mentions: A list of strings
:return True/False
"""
word_list = []
for mention in mentions:
word_list += set(mention.split())
if len(word_list) == len(set(word_list)):
return False
else:
return True |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/__init__.py | <gh_stars>10-100
"""
Services
========
All modules in this package serving for services layer.
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/retrieval.py | """
Retrieval
=========
Console application for general-purpose retrieval.
Usage
-----
::
python -m nordlys.services.er -c <config_file> -q <query>
If `-q <query>` is passed, it returns the results for the specified query and prints them in terminal.
Config parameters
------------------
- **index_name**: name of the index,
- **first_pass**:
- **1st_num_docs**: number of documents in first-pass scoring (default: 100)
- **field**: field used in first pass retrieval (default: Elastic.FIELD_CATCHALL)
- **fields_return**: comma-separated list of fields to return for each hit (default: "")
- **num_docs**: number of documents to return (default: 100)
- **start**: starting offset for ranked documents (default:0)
- **model**: name of retrieval model; accepted values: [lm, mlm, prms] (default: lm)
- **field**: field name for LM (default: catchall)
- **fields**: single field name for LM (default: catchall)
list of fields for PRMS (default: [catchall])
dictionary with fields and corresponding weights for MLM (default: {catchall: 1})
- **smoothing_method**: accepted values: [jm, dirichlet] (default: dirichlet)
- **smoothing_param**: value of lambda or mu; accepted values: [float or "avg_len"], (jm default: 0.1, dirichlet default: 2000)
- **query_file**: name of query file (JSON),
- **output_file**: name of output file,
- **run_id**: run id for TREC output
Example config
---------------
.. code:: python
{"index_name": "dbpedia_2015_10",
"first_pass": {
"1st_num_docs": 1000
},
"model": "prms",
"num_docs": 1000,
"smoothing_method": "dirichlet",
"smoothing_param": 2000,
"fields": ["names", "categories", "attributes", "similar_entity_names", "related_entity_names"],
"query_file": "path/to/queries.json",
"output_file": "path/to/output.txt",
"run_id": "test"
}
------------------------
:Authors: <NAME>, <NAME>
"""
import argparse
import json
import sys
import time
from pprint import pprint
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.scorer import Scorer, ScorerLM
from nordlys.core.utils.file_utils import FileUtils
from nordlys.config import PLOGGER
class Retrieval(object):
FIELDED_MODELS = {"mlm", "prms"}
LM_MODELS = {"lm", "mlm", "prms"}
def __init__(self, config):
self.check_config(config)
self.__config = config
self.__index_name = config["index_name"]
self.__first_pass_num_docs = int(config["first_pass"]["1st_num_docs"])
self.__first_pass_field = config["first_pass"]["field"]
self.__first_pass_fields_return = config["first_pass"]["fields_return"]
self.__first_pass_model = config["first_pass"]["model"]
self.__start = int(config["start"])
self.__model = config.get("model", None)
self.__num_docs = int(config.get("num_docs", None))
self.__query_file = config.get("query_file", None)
self.__output_file = config.get("output_file", None)
self.__run_id = config.get("run_id", self.__model)
self.__elastic = ElasticCache(self.__index_name)
@staticmethod
def check_config(config):
"""Checks config parameters and sets default values."""
try:
if config.get("index_name", None) is None:
raise Exception("index_name is missing")
# Checks first pass parameters
if config.get("first_pass", None) is None:
config["first_pass"] = {}
if config["first_pass"].get("1st_num_docs", None) is None:
config["first_pass"]["1st_num_docs"] = 1000
if config["first_pass"].get("field", None) is None:
config["first_pass"]["field"] = Elastic.FIELD_CATCHALL
if config["first_pass"].get("fields_return", None) is None:
config["first_pass"]["fields_return"] = ""
if config["first_pass"].get("model", None) is None:
config["first_pass"]["model"] = Elastic.BM25
if config.get("start", None) is None:
config["start"] = 0
if config.get("num_docs", None) is None:
config["num_docs"] = 100
if config.get("model", None) in Retrieval.LM_MODELS:
if config.get("smoothing_method", None) is None:
config["smoothing_method"] = ScorerLM.DIRICHLET
if config.get("smoothing_param", None) is None:
if config["smoothing_method"] == ScorerLM.DIRICHLET:
config["smoothing_param"] = 2000
elif config["smoothing_method"] == ScorerLM.JM:
config["smoothing_param"] = 0.1
else:
raise Exception("Smoothing method is not supported.")
if config.get("model", None) == "lm":
if config.get("fields", None) is None:
config["fields"] = Elastic.FIELD_CATCHALL
if config.get("model", None) == "mlm":
if config.get("fields", None) is None:
config["fields"] = {"similar_entity_names": 0.2, "catchall": 0.8}
if config.get("model", None) == "prms":
if config.get("fields", None) is None:
config["fields"] = [Elastic.FIELD_CATCHALL]
except Exception as e:
PLOGGER.error("Error in config file: ", e)
sys.exit(1)
def __get_fields(self):
"""Returns the name of all fields that will be used in the retrieval model."""
fields = []
if type(self.__config["fields"]) == str:
fields.append(self.__config["fields"])
elif type(self.__config["fields"]) == dict:
fields = self.__config["fields"].keys()
else:
fields = self.__config["fields"]
return fields
def _first_pass_scoring(self, analyzed_query):
"""Returns first-pass scoring of documents.
:param analyzed_query: analyzed query
:return: RetrievalResults object
"""
PLOGGER.debug("\tFirst pass scoring... ", )
res1 = self.__elastic.search(analyzed_query, self.__first_pass_field, num=self.__first_pass_num_docs,
fields_return=self.__first_pass_fields_return)
return res1
def _second_pass_scoring(self, res1, scorer):
"""Returns second-pass scoring of documents.
:param res1: first pass results
:param scorer: scorer object
:return: RetrievalResults object
"""
PLOGGER.debug("\tSecond pass scoring... ", )
for field in self.__get_fields():
self.__elastic.multi_termvector(list(res1.keys()), field)
res2 = {}
for doc_id in res1.keys():
res2[doc_id] = {"score": scorer.score_doc(doc_id), "fields": res1[doc_id].get("fields", {})}
PLOGGER.debug("done")
return res2
def retrieve(self, query, scorer=None):
"""Scores documents for the given query."""
query = self.__elastic.analyze_query(query)
# 1st pass retrieval
res1 = self._first_pass_scoring(query)
if self.__model == "bm25":
return res1
# 2nd pass retrieval
scorer = scorer if scorer else Scorer.get_scorer(self.__elastic, query, self.__config)
res2 = self._second_pass_scoring(res1, scorer)
return res2
def batch_retrieval(self):
"""Scores queries in a batch and outputs results."""
queries = json.load(open(self.__query_file))
# init output file
open(self.__output_file, "w").write("")
out = open(self.__output_file, "w")
# retrieves documents
for query_id in sorted(queries):
PLOGGER.info("scoring [" + query_id + "] " + queries[query_id])
results = self.retrieve(queries[query_id])
out.write(self.trec_format(results, query_id, self.__num_docs))
out.close()
PLOGGER.info("Output file:" + self.__output_file)
def trec_format(self, results, query_id, max_rank=100):
"""Outputs results in TREC format"""
out_str = ""
rank = 1
for doc_id, score in sorted(results.items(), key=lambda x: x[1]["score"], reverse=True):
if rank > max_rank:
break
out_str += query_id + "\tQ0\t" + doc_id + "\t" + str(rank) + "\t" + str(score["score"]) + "\t" + self.__run_id + "\n"
rank += 1
return out_str
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="config file", type=str)
args = parser.parse_args()
return args
def get_config():
example_config = {"index_name": "toy_index",
"query_file": "data/dbpedia-entity-v1/queries/test_queries2.json",
"first_pass": {
"num_docs": 10,
"field": "content",
# "model": "LMJelinekMercer",
# "model_params": {"lambda": 0.1}
},
"fields": "content",
"model": "lm",
"smoothing_method": "jm",
"smoothing_param": 0.1,
"output_file": "output/test_retrieval.txt"
}
return example_config
def main(args):
s_t = time.time() # start time
config = FileUtils.load_config(args.config) if args.config != "" else get_config()
r = Retrieval(config)
r.batch_retrieval()
e_t = time.time() # end time
print("Execution time(min):\t" + str((e_t - s_t) / 60) + "\n")
if __name__ == "__main__":
main(arg_parser())
|
medtray/MultiEm-RGCN | save_graph_info3.py | <reponame>medtray/MultiEm-RGCN
import numpy as np
import json
import os
from prepare_wikiTables_rdfs4 import PrepareGraph
import pandas as pd
def read_file_for_nfcg(file,delimiter):
text_file = open(file, "r")
lines = text_file.readlines()
list_lines = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split(delimiter)
list_lines.append(aa)
inter = np.array(list_lines)
return inter
def read_queries(file,delimiter):
text_file = open(file, "r")
lines = text_file.readlines()
list_lines = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split(delimiter)
query=[aa[0],' '.join(aa[1:])]
list_lines.append(query)
inter = np.array(list_lines)
return inter
base='./wikiTables'
tables_id_file = os.path.join(base, 'tables_id.npy')
data=read_file_for_nfcg(os.path.join(base,'qrels.txt'),'\t')
queries=read_queries(os.path.join(base,'queries_wiki'),' ')
path = os.path.join(base,'data_fields_with_values.json')
with open(path) as f:
dt = json.load(f)
index_to_use=[]
for i,row in enumerate(data):
if row[2] in dt:
index_to_use.append(i)
data=data[index_to_use]
GraphCons=PrepareGraph(queries,dt)
GraphCons.add_links_docs(data,tables_id_file)
entities_file=os.path.join(base,'entities.dict')
relations_file=os.path.join(base,'relations.dict')
train_file=os.path.join(base,'train.txt')
test_file=os.path.join(base,'test.txt')
valid_file=os.path.join(base,'valid.txt')
sep='\t'
with open(relations_file,'w') as rf:
relations_mapping=''
for i,relation in enumerate(GraphCons.relations):
relations_mapping+=str(i)+sep+relation+'\n'
relations_mapping=relations_mapping[:-1]
rf.write(relations_mapping)
rf.close()
with open(entities_file,'w') as ef:
entities_mapping=''
for (k,v) in GraphCons.entities.items():
entities_mapping+=str(v)+sep+k+'\n'
entities_mapping = entities_mapping[:-1]
ef.write(entities_mapping)
ef.close()
with open(train_file,'w') as tf:
tf.write(GraphCons.triples)
tf.close()
with open(test_file,'w') as tef:
tef.write(GraphCons.test_triples)
tef.close()
with open(valid_file,'w') as vf:
vf.write(GraphCons.test_triples)
vf.close()
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/indexer_mongo.py | """
Mongo Indexer
=============
This class is a tool for creating an index from a Mongo collection.
To use this class, you need to implement :func:`callback_get_doc_content` function.
See :mod:`~nordlys.core.data.dbpedia.indexer_fsdm` for an example usage of this class.
:Author: <NAME>
"""
from nordlys.config import MONGO_COLLECTION_DBPEDIA, MONGO_HOST, MONGO_DB, PLOGGER
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.storage.mongo import Mongo
# from nordlys.core.utils.logging_utils import PLOGGER
class IndexerMongo(object):
def __init__(self, index_name, mappings, collection, model=Elastic.BM25):
self.__index_name = index_name
self.__mappings = mappings
self.__mongo = Mongo(MONGO_HOST, MONGO_DB, collection)
self.__model = model
def build(self, callback_get_doc_content, bulk_size=1000):
"""Builds the DBpedia index from the mongo collection.
To speedup indexing, we index documents as a bulk.
There is an optimum value for the bulk size; try to figure it out.
:param callback_get_doc_content: a function that get a documet from mongo and return the content for indexing
:param bulk_size: Number of documents to be added to the index as a bulk
"""
PLOGGER.info("Building " + self.__index_name + " ...")
elastic = Elastic(self.__index_name)
elastic.create_index(self.__mappings, model=self.__model, force=True)
i = 0
docs = dict()
for mdoc in self.__mongo.find_all(no_timeout=True):
docid = Mongo.unescape(mdoc[Mongo.ID_FIELD])
# get back document from mongo with keys and _id field unescaped
doc = callback_get_doc_content(Mongo.unescape_doc(mdoc))
if doc is None:
continue
docs[docid] = doc
i += 1
if i % bulk_size == 0:
elastic.add_docs_bulk(docs)
docs = dict()
PLOGGER.info(str(i / 1000) + "K documents indexed")
# indexing the last bulk of documents
elastic.add_docs_bulk(docs)
PLOGGER.info("Finished indexing (" + str(i) + " documents in total)") |
medtray/MultiEm-RGCN | nordlys/nordlys/core/utils/entity_utils.py | <filename>nordlys/nordlys/core/utils/entity_utils.py
"""
Entity Utils
============
Utility methods for working with entities.
:Author: <NAME>
"""
from urllib.parse import unquote
from nordlys.core.storage.mongo import Mongo
class EntityUtils(object):
PREDICATE_NAME = "<rdfs:label>"
PREDICATE_REDIRECT = "<dbo:wikiPageRedirects>"
PREDICATE_DISAMBIGUATE = "<dbo:wikiPageDisambiguates>"
PREDICATE_ABSTRACT = "<dbo:abstract>"
def __init__(self, entity):
self.__entity = entity
def get_id(self):
"""Returns the (internal, i.e., prefixed) URI of the entity."""
return self.__entity[Mongo.ID_FIELD]
def get_name(self):
"""Returns the name of a entity (or None)."""
name = self.__entity.get(self.PREDICATE_NAME, None)
return name[0] if name else None
def has_name(self):
"""Checks whether the entity has a name."""
return self.PREDICATE_NAME in self.__entity
def has_abstract(self):
"""Checks whether the entity has abstract."""
return self.PREDICATE_ABSTRACT in self.__entity
def is_redirect(self):
"""Checks whether the entity is a redirect."""
return self.PREDICATE_REDIRECT in self.__entity
def is_disambiguation(self):
"""Checks whether the entity is a disambiguation page."""
specified_in_url = self.get_id().endswith("_(disambiguation)>")
has_disambiguate_predicate = self.PREDICATE_DISAMBIGUATE in self.__entity
return specified_in_url or has_disambiguate_predicate
def is_entity(self):
"""Checks whether the entity is a real entity.
It means that it:
- has a name and abstract
- is not a disambiguation or redirect page
"""
is_en = self.has_name() and self.has_abstract() and not(self.is_redirect()) and not(self.is_disambiguation())
return is_en
def has_predicate(self, predicate):
"""Checks whether the entity has the predicate."""
return predicate in self.__entity
def get_predicate(self, predicate):
"""Returns the values of a predicate (or None)."""
return self.__entity.get(predicate, None)
@staticmethod
def convert_39_to_201510(en_id):
"""Converts DBpedia 3.9 entity IDs to 2015-10
The encoding is based on http://wiki.dbpedia.org/uri-encoding
:param en_id: utf-8 decoded string, e.g. <dbpedia:Karen_Sp%C3%A4rck_Jones>, <dbpedia:O_Brother,_Where_Art_Thou?>
:return: <dbpedia:Karen_Spärck_Jones>, <dbpedia:O_Brother,_Where_Art_Thou%3F>
"""
special_chars = ['\"', '%', '?', '\\', '^', '`']
encoded_chars = ['%22', '%25', '%3F', '%5C', '%5E', '%60']
en_id = unquote(en_id)
for i in range(0, len(special_chars)):
en_id = en_id.replace(special_chars[i], encoded_chars[i])
return en_id
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/feature_cache.py | <gh_stars>10-100
"""
Feature
=======
Implements a generic feature class.
Authors: <NAME>
"""
from collections import defaultdict
class FeatureCache(object):
def __init__(self):
self.cache = defaultdict(dict)
def set_feature_val(self, feature_name, key, value):
"""Adds a feature and its value to the cache.
:param feature_name: Name of the feature
:param key: the name of what feature is computed for (e.g., a mention, entity)
:param value: feature value
"""
self.cache[feature_name][key] = value
def get_feature_val(self, feature_name, key, callback_func, *args):
"""Checks the cache and computes the feature if it does not exists"""
if key not in self.cache.get(feature_name, {}):
self.cache[feature_name][key] = callback_func(*args)
return self.cache[feature_name][key] |
medtray/MultiEm-RGCN | nordlys/nordlys/services/__init__.py | """
Services
========
All modules in this package can be used from the command line or from a RESTful API.
""" |
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/elastic.py | """
Elastic
=======
Utility class for working with Elasticsearch.
This class is to be instantiated for each index.
Indexing usage
--------------
To create an index, first you need to define field mappings and then build the index.
The sample code for creating an index is provided at :py:mod:`nordlys.core.retrieval.toy_indexer`.
Retrieval usage
---------------
The following statistics can be obtained from this class:
- Number of documents: :func:`Elastic.num_docs`
- Number of fields: :func:`Elastic.num_fields`
- Document count: :func:`Elastic.doc_count`
- Collection length: :func:`Elastic.coll_length`
- Average length: :func:`Elastic.avg_len`
- Document length: :func:`Elastic.doc_length`
- Document frequency: :func:`Elastic.doc_freq`
- Collection frequency: :func:`Elastic.coll_term_freq`
- Term frequencies: :func:`Elastic.term_freqs`
Efficiency considerations
~~~~~~~~~~~~~~~~~~~~~~~~~
- For efficiency reasons, we do not store term positions during indexing. To store them, see the corresponding mapping functions :func:`Elastic.analyzed_field`, :func:`Elastic.notanalyzed_searchable_field`.
- Use :py:mod:`ElasticCache <nordlys.core.retrieval.elastic_cache>` for getting index statistics. This module caches the statistics into memory and boosts efficeicny.
- Mind that :py:mod:`ElasticCache <nordlys.core.retrieval.elastic_cache>` does not empty the cache!
:Authors: <NAME>, <NAME>
"""
from pprint import pprint, pformat
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from nordlys.config import ELASTIC_HOSTS, ELASTIC_SETTINGS
class Elastic(object):
FIELD_CATCHALL = "catchall"
FIELD_ELASTIC_CATCHALL = "_all"
DOC_TYPE = "doc" # we don't make use of types
ANALYZER_STOP_STEM = "english"
ANALYZER_STOP = "stop_en"
BM25 = "BM25"
SIMILARITY = "sim" # Used when other similarities are used
def __init__(self, index_name):
self.__es = Elasticsearch(hosts=ELASTIC_HOSTS)
self.__index_name = index_name
@staticmethod
def analyzed_field(analyzer=ANALYZER_STOP):
"""Returns the mapping for analyzed fields.
For efficiency considerations, term positions are not stored. To store term positions, change
``"term_vector": "with_positions_offsets"``
:param analyzer: name of the analyzer; valid options: [ANALYZER_STOP, ANALYZER_STOP_STEM]
"""
if analyzer not in {Elastic.ANALYZER_STOP, Elastic.ANALYZER_STOP_STEM}:
# PLOGGER.error("Error: Analyzer", analyzer, "is not valid.")
exit(0)
return {"type": "text",
"term_vector": "yes",
"analyzer": analyzer}
@staticmethod
def notanalyzed_field():
"""Returns the mapping for not-analyzed fields."""
return {"type": "text",
"index": "not_analyzed"}
@staticmethod
def notanalyzed_searchable_field():
"""Returns the mapping for not-analyzed fields."""
return {"type": "text",
"term_vector": "yes",
"analyzer": "keyword"}
def __gen_similarity(self, model, params=None):
"""Gets the custom similarity function."""
similarity = params if params else {}
similarity["type"] = model
return {Elastic.SIMILARITY: similarity}
def __gen_analyzers(self):
"""Gets custom analyzers.
We include customized analyzers in the index setting, a field may or may not use it.
"""
analyzer = {"type": "standard", "stopwords": "_english_"}
analyzers = {"analyzer": {Elastic.ANALYZER_STOP: analyzer}}
return analyzers
def analyze_query(self, query, analyzer=ANALYZER_STOP):
"""Analyzes the query.
:param query: raw query
:param analyzer: name of analyzer
"""
if query.strip() == "":
return ""
body = {"analyzer": analyzer, "text": query}
tokens = self.__es.indices.analyze(index=self.__index_name, body=body)["tokens"]
query_terms = []
for t in sorted(tokens, key=lambda x: x["position"]):
query_terms.append(t["token"])
return " ".join(query_terms)
def get_mapping(self):
"""Returns mapping definition for the index."""
mapping = self.__es.indices.get_mapping(index=self.__index_name, doc_type=self.DOC_TYPE)
return mapping[self.__index_name]["mappings"][self.DOC_TYPE]["properties"]
def get_settings(self):
"""Returns index settings."""
return self.__es.indices.get_settings(index=self.__index_name)[self.__index_name]["settings"]["index"]
def __update_settings(self, settings):
"""Updates the index settings."""
self.__es.indices.close(index=self.__index_name)
self.__es.indices.put_settings(index=self.__index_name, body=settings)
self.__es.indices.open(index=self.__index_name)
self.__es.indices.refresh(index=self.__index_name)
def update_similarity(self, model=BM25, params=None):
"""Updates the similarity function "sim", which is fixed for all index fields.
The method and param should match elastic settings:
https://www.elastic.co/guide/en/elasticsearch/reference/2.3/index-modules-similarity.html
:param model: name of the elastic model
:param params: dictionary of params based on elastic
"""
old_similarity = self.get_settings()["similarity"]
new_similarity = self.__gen_similarity(model, params)
# We only update the similarity if it is different from the old one.
# this avoids unnecessary closing of the index
if old_similarity != new_similarity:
self.__update_settings({"similarity": new_similarity})
def delete_index(self):
"""Deletes an index."""
self.__es.indices.delete(index=self.__index_name)
print("Index <" + self.__index_name + "> has been deleted.")
def create_index(self, mappings, model=BM25, model_params=None, force=False):
"""Creates index (if it doesn't exist).
:param mappings: field mappings
:param model: name of elastic search similarity
:param model_params: name of elastic search similarity
:param force: forces index creation (overwrites if already exists)
"""
if self.__es.indices.exists(self.__index_name):
if force:
self.delete_index()
else:
# PLOGGER.info("Index already exists. No changes were made.")
return
# sets general elastic settings
body = ELASTIC_SETTINGS
#body={}
# sets the global index settings
# number of shards should be always set to 1; otherwise the stats would not be correct
body["settings"] = {"analysis": self.__gen_analyzers(),
"index": {"number_of_shards": 1,
"number_of_replicas": 0},
}
# sets similarity function
# If model is not BM25, a similarity module with the given model and params is defined
if model != Elastic.BM25:
body["settings"]["similarity"] = self.__gen_similarity(model, model_params)
sim = model if model == Elastic.BM25 else Elastic.SIMILARITY
for mapping in mappings.values():
mapping["similarity"] = sim
# sets the field mappings
body["mappings"] = {self.DOC_TYPE: {"properties": mappings}}
# body["settings"]["similarity"]= {
# "scripted_tfidf": {
# "type": "scripted",
# "script": {
# "source": "double tf = Math.sqrt(doc.freq); double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; double norm = 1/Math.sqrt(doc.length); return query.boost * tf * idf * norm;"
# }
# }
# }
# creates the index
self.__es.indices.create(index=self.__index_name, body=body)
# PLOGGER.info(pformat(body))
# PLOGGER.info("New index <" + self.__index_name + "> is created.")
def add_docs_bulk(self, docs):
"""Adds a set of documents to the index in a bulk.
:param docs: dictionary {doc_id: doc}
"""
actions = []
for doc_id, doc in docs.items():
action = {
"_index": self.__index_name,
"_type": self.DOC_TYPE,
"_id": doc_id,
"_source": doc
}
actions.append(action)
if len(actions) > 0:
helpers.bulk(self.__es, actions)
def add_doc(self, doc_id, contents):
"""Adds a document with the specified contents to the index.
:param doc_id: document ID
:param contents: content of document
"""
self.__es.index(index=self.__index_name, doc_type=self.DOC_TYPE, id=doc_id, body=contents)
def get_doc(self, doc_id, fields=None, source=True):
"""Gets a document from the index based on its ID.
:param doc_id: document ID
:param fields: list of fields to return (default: all)
:param source: return document source as well (default: yes)
"""
return self.__es.get(index=self.__index_name, doc_type=self.DOC_TYPE, id=doc_id, fields=fields, _source=source)
def search(self, query, field, num=100, fields_return="", start=0):
"""Searches in a given field using the similarity method configured in the index for that field.
:param query: query string
:param field: field to search in
:param num: number of hits to return (default: 100)
:param fields_return: additional document fields to be returned
:param start: starting offset (default: 0)
:return: dictionary of document IDs with scores
"""
hits = self.__es.search(index=self.__index_name, q=query, df=field, _source=False, size=num,
fielddata_fields=fields_return, from_=start)["hits"]["hits"]
results = {}
for hit in hits:
results[hit["_id"]] = {"score": hit["_score"], "fields": hit.get("fields", {})}
return results
def search_complex(self, body, num=10, fields_return="", start=0):
"""
Supports complex structured queries, which are sent as a ``body`` field in Elastic search.
For detailed information on formulating structured queries, see the
`official instructions. <https://www.elastic.co/guide/en/elasticsearch/guide/current/search-in-depth.html>`_
Below is an example to search in two particular fields that each must contain a specific term.
:Example:
.. code-block:: python
# [explanation of the query]
term_1 = "hello"
term_2 = "world"
body = {
"query": {
"bool": {
"must": [
{
"match": {"title": term_1}
},
{
"match_phrase": {"content": term_2}
}
]
}
}
}
:param body: query body
:param field: field to search in
:param num: number of hits to return (default: 100)
:param fields_return: additional document fields to be returned
:param start: starting offset (default: 0)
:return: dictionary of document IDs with scores
"""
hits = self.__es.search(index=self.__index_name, body=body, _source=False, size=num,
fielddata_fields=fields_return, from_=start)["hits"]["hits"]
results = {}
for hit in hits:
results[hit["_id"]] = {"score": hit["_score"], "fields": hit.get("fields", {})}
return results
def get_field_stats(self, field):
"""Returns stats of the given field."""
return self.__es.field_stats(index=self.__index_name, fields=[field])["indices"]["_all"]["fields"][field]
def get_fields(self):
"""Returns name of fields in the index."""
return list(self.get_mapping().keys())
# =========================================
# ================= Stats =================
# =========================================
def _get_termvector(self, doc_id, field, term_stats=False):
"""Returns a term vector for a given document field, including global field and term statistics.
Term stats can have a serious performance impact; should be set to true only if it is needed!
:param doc_id: document ID
:param field: field name
:param term_stats: if True, returns term statistics
:return: Term vector dictionary
"""
tv = self.__es.termvectors(index=self.__index_name, doc_type=self.DOC_TYPE, id=doc_id, fields=field,
term_statistics=term_stats)
return tv.get("term_vectors", {}).get(field, {}).get("terms", {})
def _get_multi_termvectors(self, doc_ids, field, term_stats=False):
"""Returns multiple term vectors for a given document field (similar to a single term vector)
:param doc_ids: document ID
:param field: field name
:param term_stats: if True, returns term statistics
:return: {'doc_id': {tv}, ..}
"""
tv_all = self.__es.mtermvectors(index=self.__index_name, doc_type=self.DOC_TYPE, ids=",".join(doc_ids),
fields=field, term_statistics=term_stats)
result = {}
for tv in tv_all["docs"]:
result[tv["_id"]] = tv.get("term_vectors", {}).get(field, {}).get("terms", {})
return result
def _get_coll_termvector(self, term, field):
"""Returns a term vector containing collection stats of a term."""
body = {"query": {"bool": {"must": {"term": {field: term}}}}}
hits = self.search_complex(body, num=1)
# hits = self.search(term, field, num=1)
doc_id = next(iter(hits.keys())) if len(hits) > 0 else None
return self._get_termvector(doc_id, field, term_stats=True) if doc_id else {}
def num_docs(self):
"""Returns the number of documents in the index."""
return self.__es.count(index=self.__index_name, doc_type=self.DOC_TYPE)["count"]
def num_fields(self):
"""Returns number of fields in the index."""
return len(self.get_mapping())
def doc_count(self, field):
"""Returns number of documents with at least one term for the given field."""
return self.get_field_stats(field)["doc_count"]
def coll_length(self, field):
"""Returns length of field in the collection."""
return self.get_field_stats(field)["sum_total_term_freq"]
def avg_len(self, field):
"""Returns average length of a field in the collection."""
return self.coll_length(field) / self.doc_count(field)
def doc_length(self, doc_id, field):
"""Returns length of a field in a document."""
return sum(self.term_freqs(doc_id, field).values())
def doc_freq(self, term, field, tv=None):
"""Returns document frequency for the given term and field."""
coll_tv = tv if tv else self._get_coll_termvector(term, field)
return coll_tv.get(term, {}).get("doc_freq", 0)
def coll_term_freq(self, term, field, tv=None):
""" Returns collection term frequency for the given field."""
coll_tv = tv if tv else self._get_coll_termvector(term, field)
return coll_tv.get(term, {}).get("ttf", 0)
def term_freqs(self, doc_id, field, tv=None):
"""Returns term frequencies of all terms for a given document and field."""
doc_tv = tv if tv else self._get_termvector(doc_id, field)
term_freqs = {}
for term, val in doc_tv.items():
term_freqs[term] = val["term_freq"]
return term_freqs
def term_freq(self, doc_id, field, term):
"""Returns frequency of a term in a given document and field."""
return self.term_freqs(doc_id, field).get(term, 0)
if __name__ == "__main__":
# example usage of index statistics
doc_id = 1
field = "content"
term = "you lately"
es = Elastic("toy_index")
pprint(es._get_termvector(doc_id, field=field, term_stats=True))
result=es.search(term, field)
pprint(result)
print("================= Stats =================")
print("[FIELD]: %s [TERM]: %s" % (field, term))
print("- Number of documents: %d" % es.num_docs())
print("- Number of fields: %d" % es.num_fields())
print("- Document count: %d" % es.doc_count(field))
print("- Collection length: %d" % es.coll_length(field))
print("- Average length: %.2f" % es.avg_len(field))
print("- Document length: %d" % es.doc_length(doc_id, field))
print("- Number of fields: %d" % es.num_fields())
print("- Document frequency: %d" % es.doc_freq(term, field))
print("- Collection frequency: %d" % es.coll_term_freq(term, field))
print("- Term frequencies:")
pprint(es.term_freqs(doc_id, field))
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/elr/__init__.py | """
ELR
===
This package is the implementation of ELR-based models.
"""
|
medtray/MultiEm-RGCN | LTR/joint_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class ARCI(nn.Module):
def __init__(self, args):
""""Constructor of the class."""
super(ARCI, self).__init__()
self.wv = args.wv
self.index_to_word = args.index_to_word
self.input_dim = args.emsize
self.device = args.device
#self.emb_drop = nn.Dropout(p=args.dropout_emb)
num_conv1d_layers = len(args.filters_1d)
assert num_conv1d_layers == len(args.kernel_size_1d)
assert num_conv1d_layers == len(args.maxpool_size_1d)
query_feats = args.max_query_len
query_conv1d_layers = []
doc_conv1d_layers = []
for i in range(num_conv1d_layers):
inpsize = args.emsize if i == 0 else args.filters_1d[i - 1]
pad = args.kernel_size_1d[i] // 2
layer = nn.Sequential(
nn.Conv1d(inpsize, args.filters_1d[i], args.kernel_size_1d[i],
padding=pad),
nn.ReLU(inplace=True),
nn.MaxPool1d(args.maxpool_size_1d[i])
)
query_conv1d_layers.append(layer)
query_feats = query_feats // args.maxpool_size_1d[i]
assert query_feats != 0
self.query_conv1d_layers = nn.ModuleList(query_conv1d_layers)
#inpsize = (args.filters_1d[-1] * query_feats) + self.input_dim
inpsize = (args.filters_1d[-1] * query_feats)
# self.mlp = nn.Sequential(
# nn.Linear(self.input_dim, 1),
# #nn.Linear(inpsize // 2, 1)
# )
self.mlpQ=nn.Linear(inpsize,self.input_dim)
def to_embedding(self,input):
shape_input = list(input.shape)
em = input.view(-1)
list_of_embeddings = []
for key in em:
list_of_embeddings += self.wv[self.index_to_word[key]]
list_of_embeddings = torch.Tensor(list_of_embeddings)
embeds = list_of_embeddings.view(shape_input[0], shape_input[1],
self.input_dim).to(self.device)
return embeds
def to_embedding_doc(self,input):
shape_input = list(input.shape)
em = input.view(-1)
list_of_embeddings = []
for key in em:
list_of_embeddings += self.wv[self.index_to_word[key]]
list_of_embeddings = torch.Tensor(list_of_embeddings)
embeds = list_of_embeddings.view(shape_input[0],self.input_dim).to(self.device)
return embeds
def forward(self, batch_queries, batch_docs):
#batch_docs = batch_docs.unsqueeze(0)
batch_queries = batch_queries[0:1]
num_docs = batch_docs.shape[0]
batch_size = 1
embedded_queries = self.to_embedding(batch_queries)
embedded_queries = embedded_queries[0:1]
inp_rep = embedded_queries.transpose(1, 2)
for layer in self.query_conv1d_layers:
inp_rep = layer(inp_rep)
# batch_size x ?
conv_queries = inp_rep.flatten(1)
# batch_size x num_rel_docs x ?
conv_queries = conv_queries.unsqueeze(1).expand(
batch_size, num_docs, conv_queries.size(1))
# batch_size * num_rel_docs x ?
conv_queries = conv_queries.contiguous().view(batch_size * num_docs, -1)
# embedded_queries_mean=torch.mean(embedded_queries,dim=1)
# # batch_size x num_rel_docs x ?
# conv_queries = embedded_queries_mean.unsqueeze(1).expand(
# batch_size, num_docs, embedded_queries_mean.size(1))
# batch_size * num_rel_docs x ?
conv_queries = conv_queries.contiguous().view(batch_size * num_docs, -1)
conv_queries=self.mlpQ(conv_queries)
embedded_docs = self.to_embedding_doc(batch_docs)
#com_rep = torch.cat((conv_queries, embedded_docs), 1)
com_rep= torch.mul(conv_queries,embedded_docs)
return com_rep
class CONVKNRM(nn.Module):
"""Class that classifies question pair as duplicate or not."""
def __init__(self, args):
""""Constructor of the class."""
super(CONVKNRM, self).__init__()
self.wv=args.wv
self.index_to_word=args.index_to_word
self.input_dim=args.emsize
self.device=args.device
self.nbins = args.nbins
#self.dense_f = nn.Linear(self.nbins * 9, 1, 1)
self.tanh = nn.Tanh()
self.conv_uni = nn.Sequential(
nn.Conv2d(1, 128, (1, self.input_dim)),
nn.ReLU()
)
self.conv_bi = nn.Sequential(
nn.Conv2d(1, 128, (2, self.input_dim)),
nn.ReLU()
)
self.conv_tri = nn.Sequential(
nn.Conv2d(1, 128, (3, self.input_dim)),
nn.ReLU()
)
tensor_mu = torch.FloatTensor(args.mu).to(self.device)
tensor_sigma = torch.FloatTensor(args.sigma).to(self.device)
self.mu = Variable(tensor_mu, requires_grad=False).view(1, 1, 1, self.nbins)
self.sigma = Variable(tensor_sigma, requires_grad=False).view(1, 1, 1, self.nbins)
def get_intersect_matrix(self, q_embed, d_embed):
sim = torch.bmm(q_embed, d_embed).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[2], 1)
pooling_value = torch.exp((- ((sim - self.mu) ** 2) / (self.sigma ** 2) / 2))
pooling_sum = torch.sum(pooling_value, 2)
log_pooling_sum = torch.log(torch.clamp(pooling_sum, min=1e-10)) * 0.01
log_pooling_sum = torch.sum(log_pooling_sum, 1)
return log_pooling_sum
def to_embedding(self,input):
shape_input = list(input.shape)
em = input.view(-1)
list_of_embeddings = []
for key in em:
list_of_embeddings += self.wv[self.index_to_word[key]]
list_of_embeddings = torch.Tensor(list_of_embeddings)
embeds = list_of_embeddings.view(shape_input[0], shape_input[1],
self.input_dim).to(self.device)
return embeds
def forward(self, batch_queries, batch_docs,batch_semantic):
emb_query = self.to_embedding(batch_queries)
emb_desc = self.to_embedding(batch_docs)
qwu_embed = torch.transpose(
torch.squeeze(self.conv_uni(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
qwb_embed = torch.transpose(
torch.squeeze(self.conv_bi(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
qwt_embed = torch.transpose(
torch.squeeze(self.conv_tri(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
dwu_embed = torch.squeeze(
self.conv_uni(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
dwb_embed = torch.squeeze(
self.conv_bi(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
dwt_embed = torch.squeeze(
self.conv_tri(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
qwu_embed_norm = F.normalize(qwu_embed, p=2, dim=2, eps=1e-10)
qwb_embed_norm = F.normalize(qwb_embed, p=2, dim=2, eps=1e-10)
qwt_embed_norm = F.normalize(qwt_embed, p=2, dim=2, eps=1e-10)
dwu_embed_norm = F.normalize(dwu_embed, p=2, dim=1, eps=1e-10)
dwb_embed_norm = F.normalize(dwb_embed, p=2, dim=1, eps=1e-10)
dwt_embed_norm = F.normalize(dwt_embed, p=2, dim=1, eps=1e-10)
log_pooling_sum_wwuu = self.get_intersect_matrix(qwu_embed_norm, dwu_embed_norm)
log_pooling_sum_wwut = self.get_intersect_matrix(qwu_embed_norm, dwt_embed_norm)
log_pooling_sum_wwub = self.get_intersect_matrix(qwu_embed_norm, dwb_embed_norm)
log_pooling_sum_wwbu = self.get_intersect_matrix(qwb_embed_norm, dwu_embed_norm)
log_pooling_sum_wwtu = self.get_intersect_matrix(qwt_embed_norm, dwu_embed_norm)
log_pooling_sum_wwbb = self.get_intersect_matrix(qwb_embed_norm, dwb_embed_norm)
log_pooling_sum_wwbt = self.get_intersect_matrix(qwb_embed_norm, dwt_embed_norm)
log_pooling_sum_wwtb = self.get_intersect_matrix(qwt_embed_norm, dwb_embed_norm)
log_pooling_sum_wwtt = self.get_intersect_matrix(qwt_embed_norm, dwt_embed_norm)
log_pooling_sum = torch.cat(
[log_pooling_sum_wwuu, log_pooling_sum_wwut, log_pooling_sum_wwub, log_pooling_sum_wwbu,
log_pooling_sum_wwtu,log_pooling_sum_wwbb, log_pooling_sum_wwbt, log_pooling_sum_wwtb, log_pooling_sum_wwtt], 1)
#output = torch.squeeze(F.tanh(self.dense_f(log_pooling_sum)), 1)
return log_pooling_sum
class JointModel(nn.Module):
def __init__(self, args):
""""Constructor of the class."""
super(JointModel, self).__init__()
self.output = nn.Linear(75, 1, 1)
self.convknrm = CONVKNRM(args).to(args.device)
self.arci = ARCI(args).to(args.device)
self.args=args
self.fc1 = nn.Linear(100, 10)
self.fc2 = nn.Linear(45, 20)
def forward(self, batch_queries_w, batch_docs_w,batch_queries_wn,batch_docs_wn,batch_queries_t,batch_docs_t, batch_semantic):
outputs_w = self.convknrm(batch_queries_w, batch_docs_w, batch_semantic).to(self.args.device)
outputs_wn = self.convknrm(batch_queries_wn, batch_docs_wn, batch_semantic).to(self.args.device)
outputs_t = self.arci(batch_queries_t, batch_docs_t).to(self.args.device)
outputs_t = self.fc1(outputs_t)
outputs_wn = self.fc2(outputs_wn)
feat=torch.cat([outputs_w,outputs_wn,outputs_t],dim=1)
scores=self.output(feat)
scores=torch.squeeze(scores,dim=1)
return scores
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/utils/__init__.py | """
@author: <NAME>
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/query/__init__.py | """
Query
=====
This is the query package.
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/eval/trec_eval.py | """
Trec Evaluation
===============
Wrapper for trec_eval.
:Authors: <NAME>, <NAME>
"""
from subprocess import Popen, PIPE
from shlex import split
from nordlys.config import TREC_EVAL
class TrecEval(object):
"""Holds evaluation results obtained using trec_eval."""
__TREC_EVAL_FLAGS = "-c -m all_trec -q"
def __init__(self):
self.__results = None # results[query_id][metric] = score
def __eval_proc(self, qrels_file, run_file, eval_file=None):
"""Executes the evaluation process call and optionally saves the output to a file.
:param qrels_file: name of qrels file
:param run_file: name of run file
:param eval_file: name of evaluation output file
"""
cmd_flags = " ".join([TREC_EVAL, self.__TREC_EVAL_FLAGS, qrels_file, run_file])
if eval_file is not None:
# TODO save output to file
pass
p = Popen(split(cmd_flags), stdout=PIPE)
output, err = p.communicate()
output = output.decode('utf8')
return output, err
def load_results(self, eval_file):
"""Loads results from an existing evaluation file.
:param eval_file: name of evaluation file
"""
self.__results = {}
# TODO
pass
def evaluate(self, qrels_file, run_file, eval_file=None):
"""Evaluates a runfile using trec_eval. Optionally writes evaluation output to file.
:param qrels_file: name of qrels file
:param run_file: name of run file
:param eval_file: name of evaluation output file
"""
self.__results = {}
output, _ = self.__eval_proc(qrels_file, run_file, eval_file=eval_file)
for line in output.splitlines():
metric, query_id, score = line.split()
metric = metric.lower()
if query_id == "all": # ignore "all" lines
continue
try:
score = float(score)
except ValueError: # e.g. some bad-formed lines with a "score" like '----------'
continue
query_data = self.__results.get(query_id, {})
query_data[metric] = score
self.__results[query_id] = query_data
def get_query_ids(self):
"""Returns the set of queryIDs for which we have results."""
return self.__results.keys()
def get_score(self, query_id, metric):
"""Returns the score for a given queryID and metric.
:param query_id: queryID
:param metric: metric
:return: score (or None if not found)
"""
return self.__results.get(query_id, {}).get(metric.lower(), None)
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/el/ltr.py | <gh_stars>10-100
"""
LTR Entity Linking Approach
===========================
Class for Learning-to-Rank entity linking approach
:Author: <NAME>
"""
import csv
import json
from nordlys.config import PLOGGER, ELASTIC_INDICES
from nordlys.core.ml.instance import Instance
from nordlys.core.ml.instances import Instances
from nordlys.core.ml.ml import ML
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.logic.el.el_utils import is_name_entity
from nordlys.logic.el.greedy import Greedy
from nordlys.logic.entity.entity import Entity
from nordlys.logic.features.feature_cache import FeatureCache
from nordlys.logic.features.ftr_entity import FtrEntity
from nordlys.logic.features.ftr_entity_similarity import FtrEntitySimilarity
from nordlys.logic.features.ftr_mention import FtrMention
from nordlys.logic.features.ftr_entity_mention import FtrEntityMention
from nordlys.logic.query.mention import Mention
from nordlys.logic.query.query import Query
class LTR(object):
def __init__(self, query, entity, elastic, fcache, model=None, threshold=None, cmns_th=0.1):
self.__query = query
self.__entity = entity
self.__elastic = elastic
self.__fcache = fcache
self.__model = model
self.__threshold = threshold
self.__cmns_th = cmns_th
# =========================
# Code related to training
# =========================
@staticmethod
def __check_config(config):
"""Checks config parameters and set default values."""
must_have = ["model_file", "training_set", "ground_truth", "query_file"]
try:
for i in range(0,2):
if must_have[i] not in config:
raise Exception(must_have[i] + "is not defined!")
if config.get("gen_training_set", False):
for i in range(2, 4):
if must_have[i] not in config:
raise Exception(must_have[i] + "is not defined!")
except Exception as e:
PLOGGER.error("Error in config file: ", e)
exit(1)
@staticmethod
def train(config):
LTR.__check_config(config)
if config.get("gen_training_set", False):
gt = LTR.load_yerd(config["ground_truth"])
LTR.gen_train_set(gt, config["query_file"], config["training_set"])
instances = Instances.from_json(config["training_set"])
ML(config).train_model(instances)
@staticmethod
def load_yerd(gt_file):
"""
Reads the Y-ERD collection and returns a dictionary.
:param gt_file: Path to the Y-ERD collection
:return: dictionary {(qid, query, en_id, mention) ...}
"""
PLOGGER.info("Loading the ground truth ...")
gt = set()
with open(gt_file, "r") as tsvfile:
reader = csv.DictReader(tsvfile, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in reader:
if line["entity"] == "":
continue
query = Query(line["query"]).query
mention = Query(line["mention"]).query
gt.add((line["qid"], query, line["entity"], mention))
return gt
@staticmethod
def gen_train_set(gt, query_file, train_set):
"""Trains LTR model for entity linking."""
entity, elastic, fcache = Entity(), ElasticCache(ELASTIC_INDICES[0]), FeatureCache()
inss = Instances()
positive_annots = set()
# Adds groundtruth instances (positive instances)
PLOGGER.info("Adding groundtruth instances (positive instances) ....")
for item in sorted(gt): # qid, query, en_id, mention
ltr = LTR(Query(item[1], item[0]), entity, elastic, fcache)
ins = ltr.__gen_raw_ins(item[2], item[3])
ins.features = ltr.get_features(ins)
ins.target = 1
inss.add_instance(ins)
positive_annots.add((item[0], item[2]))
# Adds all other instances
PLOGGER.info("Adding all other instances (negative instances) ...")
for qid, q in sorted(json.load(open(query_file, "r")).items()):
PLOGGER.info("Query [" + qid + "]")
ltr = LTR(Query(q, qid), entity, elastic, fcache)
q_inss = ltr.get_candidate_inss()
for ins in q_inss.get_all():
if (qid, ins.get_property("en_id")) in positive_annots:
continue
ins.target = 0
inss.add_instance(ins)
inss.to_json(train_set)
# =========================
# Code related to Linking
# =========================
def link(self):
"""Links the query to the entity.
:return: dictionary [{"mention": xx, "entity": yy, "score": zz}, ...]
"""
inss = self.rank_ens()
linked_ens = self.disambiguate(inss)
return linked_ens
def get_candidate_inss(self):
"""Detects mentions and their candidate entities (with their commoness scores) and generates instances
:return: Instances object
"""
instances = Instances()
for ngram in self.__query.get_ngrams():
cand_ens = Mention(ngram, self.__entity, self.__cmns_th).get_cand_ens()
for en_id, commonness in cand_ens.items():
if not is_name_entity(en_id):
continue
self.__fcache.set_feature_val("commonness", en_id + "_" + ngram, commonness)
ins = self.__gen_raw_ins(en_id, ngram)
ins.features = self.get_features(ins, cand_ens)
instances.add_instance(ins)
return instances
def rank_ens(self):
"""Ranks instances according to the learned LTR model
:param n: length of n-gram
:return: dictionary {(dbp_uri, fb_id):commonness, ..}
"""
if self.__model is None:
PLOGGER.error("LTR model is not defined.")
inss = self.get_candidate_inss()
ML({}).apply_model(inss, self.__model)
return inss
def disambiguate(self, inss):
"""Performs disambiguation"""
greedy = Greedy(self.__threshold)
inter_sets = greedy.disambiguate(inss)
uniq_men_en = {}
for iset in inter_sets:
for men, (en_id, score) in iset.items():
uniq_men_en[(men, en_id)] = score
linked_ens = []
for men_en, score in uniq_men_en.items():
linked_ens.append({"mention": men_en[0], "entity": men_en[1], "score": score})
return linked_ens
def get_features(self, ins, cand_ens=None):
"""Generates the features set for each instance.
:param ins: instance object
:param cand_ens: dictionary of candidate entities {en_id: cmns, ...}
:return: dictionary of features {ftr_name: value, ...}
"""
e = ins.get_property("en_id")
m = ins.get_property("mention")
q = ins.get_property("query")
features = {}
# --- entity features ---
ftr_entity = FtrEntity(e, self.__entity)
features["outlinks"] = self.__fcache.get_feature_val("outlinks", e, ftr_entity.outlinks)
features["redirects"] = self.__fcache.get_feature_val("redirects", e, ftr_entity.redirects)
# --- mention features ---
ftr_mention = FtrMention(m, self.__entity, cand_ens)
features["len_ratio"] = ftr_mention.len_ratio(q)
features["len"] = ftr_mention.mention_len()
features["matches"] = self.__fcache.get_feature_val("matches", m, ftr_mention.matches)
# --- mention-entity features ---
ftr_entity_mention = FtrEntityMention(e, m, self.__entity)
key = e + "_" + m
features["commonness"] = self.__fcache.get_feature_val("commonness", key, ftr_entity_mention.commonness)
features["mct"] = ftr_entity_mention.mct()
features["tcm"] = ftr_entity_mention.tcm()
features["tem"] = ftr_entity_mention.tem()
features["pos1"] = ftr_entity_mention.pos1()
ftr_sim_mention = FtrEntitySimilarity(m, e, self.__elastic)
features["sim_m"] = self.__fcache.get_feature_val("sim", key, ftr_sim_mention.lm_score)
# --- entity-query features ---
ftr_entity_query = FtrEntityMention(e, q, self.__entity)
features["qct"] = ftr_entity_query.mct()
features["tcq"] = ftr_entity_query.tcm()
features["teq"] = ftr_entity_query.tem()
key = e + "_" + q
ftr_sim_query = FtrEntitySimilarity(q, e, self.__elastic)
features["sim_q"] = self.__fcache.get_feature_val("sim", key, ftr_sim_query.lm_score)
features["context_sim"] = self.__fcache.get_feature_val("context_sim", key, ftr_sim_query.context_sim, m)
return features
def __gen_raw_ins(self, en_id, mention):
"""Generates an instance without features"""
ins_id = self.__query.qid + "_" + en_id + "_" + mention
index = self.__query.qid.rfind("_")
session = self.__query.qid[:index] if index != -1 else self.__query.qid
ins = Instance(ins_id)
ins.add_property("qid", self.__query.qid)
ins.add_property("query", self.__query.query)
ins.add_property("en_id", en_id)
ins.add_property("mention", mention)
ins.add_property("session", session)
return ins |
viosey/flask-mongo | app.py | <reponame>viosey/flask-mongo<filename>app.py
from flask import Flask, render_template, request, url_for, redirect, session
import pymongo
import bcrypt
app = Flask(__name__)
app.secret_key = "testing"
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client.database
records = db.collection
@app.route('/')
def index():
if "email" in session:
email = session["email"]
return render_template('index.html', email=email)
else:
return redirect(url_for("login"))
@app.route("/signup", methods=['post', 'get'])
def signup():
if "email" in session:
return redirect(url_for("index"))
if request.method == "POST":
email = request.form.get("email")
username = request.form.get("username")
password = <PASSWORD>("password")
#if found in database showcase that it's found
user_found = records.find_one({"username": username})
email_found = records.find_one({"email": email})
if user_found:
message = 'There already is a user by that name'
return render_template('signup.html', message=message)
if email_found:
message = 'This email already exists in database'
return render_template('signup.html', message=message)
else:
#hash the password and encode it
hashpass = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
records.insert_one({'username': username, 'email': email, 'password': <PASSWORD>})
session['email'] = email
return redirect(url_for("index"))
return render_template('signup.html')
@app.route("/login", methods=["POST", "GET"])
def login():
message = 'Please login to your account'
if "email" in session:
return redirect(url_for("index"))
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
#check if email exists in database
email_found = records.find_one({"email": email})
if email_found:
email_val = email_found['email']
passwordcheck = email_found['password']
#encode the password and check if it matches
if bcrypt.checkpw(password.encode('utf-8'), passwordcheck):
session["email"] = email_val
return redirect(url_for('index'))
else:
if "email" in session:
return redirect(url_for("index"))
message = 'Wrong password'
return render_template('login.html', message=message)
else:
message = 'Email not found'
return render_template('login.html', message=message)
return render_template('login.html', message=message)
@app.route("/signout", methods=["POST", "GET"])
def signout():
if "email" in session:
session.pop("email", None)
message = 'You are signed out!'
return render_template('index.html', message=message)
else:
return redirect(url_for("index"))
#end of code to run it
if __name__ == "__main__":
app.run(debug=True) |
omelinb/SQLConnector | connector.py | <filename>connector.py
#!/usr/bin/env python3
from PyQt5.QtCore import QAbstractTableModel, QVariant, Qt, QModelIndex
from PyQt5.QtWidgets import (QWidget, QPushButton, QPlainTextEdit, QLineEdit,
QMessageBox, QApplication, QDesktopWidget,
QTableView, QComboBox, QGridLayout, QLabel)
import psycopg2
import logging
import sqlite3
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class Connector:
def __init__(self, connstring, db_type):
"""
Args:
connstring: a string
db_type: a string
"""
self.connstring = connstring
self.connector = db_type
self.connect = None
def execute(self, query):
self.connect = self.connector.connect(self.connstring)
self.cursor = self.connect.cursor()
self.cursor.execute(query)
self.connect.commit()
def get_headers(self):
if self.cursor.description:
return [col[0] for col in self.cursor.description]
def get_data(self):
return self.cursor.fetchmany(size=1000)
def __del__(self):
if self.connect:
self.connect.close()
class ResultTableModel(QAbstractTableModel):
ROWS_COUNT = 25
def __init__(self, data, headers, datasource=None, parent=None):
"""
Args:
data: a list of lists
headers: a list of strings
"""
super().__init__(parent)
self.data = data
self.headers = headers
self.rowsLoaded = ResultTableModel.ROWS_COUNT
self.datasource = datasource
def rowCount(self, parent):
if len(self.data) <= self.rowsLoaded:
return len(self.data)
return self.rowsLoaded
def columnCount(self, parent):
return len(self.headers)
def canFetchMore(self, index=QModelIndex()):
if len(self.data) > self.rowsLoaded:
return True
if self.datasource:
self.addRecords()
return False
def addRecords(self):
self.beginResetModel()
self.data += self.datasource.get_data()
self.endResetModel()
def fetchMore(self, index=QModelIndex()):
reminder = len(self.data) - self.rowsLoaded
itemsToFetch = min(reminder, ResultTableModel.ROWS_COUNT)
self.beginInsertRows(QModelIndex(), self.rowsLoaded, self.rowsLoaded + itemsToFetch - 1)
self.rowsLoaded += itemsToFetch
self.endInsertRows()
def data(self, index, role):
if not index.isValid():
return QVariant()
elif role != Qt.DisplayRole:
return QVariant()
return QVariant(self.data[index.row()][index.column()])
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.headers[col])
return QVariant()
class MainWidget(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.resize(700, 500)
self.center()
self.setWindowTitle('SQL Connector')
grid = QGridLayout()
grid.setSpacing(20)
self.connStringLabel = QLabel('Connection', self)
self.queryFieldLabel = QLabel('Query field', self)
self.resultTableLabel = QLabel('Result', self)
self.connString = QLineEdit(':memory:', self)
self.typesList = QComboBox(self)
self.typesList.addItem('sqlite3')
self.typesList.addItem('postgres')
self.queryField = QPlainTextEdit(self)
self.resultTable = QTableView(self)
self.launchButton = QPushButton('Launch', self)
grid.addWidget(self.connStringLabel, 0, 0)
grid.addWidget(self.connString, 0, 1, 1, 4)
grid.addWidget(self.typesList, 0, 5)
grid.addWidget(self.queryFieldLabel, 1, 0)
grid.addWidget(self.queryField, 1, 1, 2, 4)
grid.addWidget(self.resultTableLabel, 4, 0)
grid.addWidget(self.resultTable, 4, 1, 2, 4)
grid.addWidget(self.launchButton, 7, 5)
self.launchButton.clicked.connect(self.on_click)
self.setLayout(grid)
def show_message(self, message, title='Message'):
messageBox = QMessageBox()
messageBox.setText(message)
messageBox.setWindowTitle(title)
messageBox.exec_()
def on_click(self):
connString = self.connString.text()
query = self.queryField.toPlainText()
dbType = self.typesList.currentText()
if not connString:
connString = ':memory:'
if dbType == 'sqlite3' and connString != ':memory:':
if not connString.startswith('/'):
connString = os.path.join(BASE_DIR, connString)
if not os.path.isfile(connString):
self.show_message('There is no such database file.')
return
connector = get_connector(dbType, connString)
try:
connector.execute(query)
data, headers = connector.get_data(), connector.get_headers()
if not headers:
self.show_message('There is no result for your query.')
return
model = ResultTableModel(data, headers, connector)
self.resultTable.setModel(model)
except (psycopg2.ProgrammingError, sqlite3.OperationalError) as e:
self.show_message(str(e)[:1].capitalize() + str(e)[1:], 'Error!')
logging.error(e)
except psycopg2.OperationalError as e:
self.show_message(str(e)[:1].capitalize() + str(e)[1:], 'Error!')
logging.error(e)
def center(self):
frameGeometry = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
frameGeometry.moveCenter(centerPoint)
self.move(frameGeometry.topLeft())
def get_connector(dbType, connstring):
dbTypes = {
'sqlite3': sqlite3,
'postgres': psycopg2}
return Connector(connstring, dbTypes[dbType])
def main():
formatstring = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(
filename='connector.log', level=logging.INFO, format=formatstring)
app = QApplication(sys.argv)
mainWidget = MainWidget()
mainWidget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
Karpova-Lab/syringe-pump | doc_src/source/pyControl_files/_syringepump.py | <reponame>Karpova-Lab/syringe-pump<filename>doc_src/source/pyControl_files/_syringepump.py
import pyControl.hardware as _h
from machine import UART
class Syringe_pump():
def __init__(self, port):
assert port.UART is not None, '! Pump needs port with UART.'
self.uart = UART(port.UART, 9600)
self.uart.init(9600, bits=8, parity=None, stop=1, timeout=1)
self.uart.write('C')
def infuse(self,val):
self.uart.write('I,{}\n'.format(val))
def check_for_serial(self):
if self.uart.any():
return self.uart.readline().decode("utf-8").strip('\n')
else:
return None
def reset_volume(self):
self.uart.write('Z')
def retract(self):
self.uart.write('R') |
Karpova-Lab/syringe-pump | doc_src/source/BOMs/tableGenerator.py | <gh_stars>10-100
import pandas as pd
from sys import argv
import pyperclip
script,xlsFile = argv
def getColWidths(table):
colMax = []
for cols in table:
lengths = []
for entry in table[cols]:
lengths.append(len(entry))
colMax.append(max(lengths) if max(lengths)>len(cols) else len(cols) )
return colMax
def printDivider(colWidths,emptyVec=[],doubleDash = False):
dash = "=" if doubleDash else "-"
index = 0
dividerString = ""
for width in colWidths:
insert = dash
if emptyVec:
if emptyVec[index]:
insert = " "
dividerString += "{0}{1}".format("+",(width+2)*insert)
index += 1
dividerString += "+\n"
return dividerString
def printContents(table,colWidths,isHeader=False):
contentString = "| "
index = 0
blankCols = []
for cols in table:
content = cols if isHeader else table.loc[row,cols]
if index == 0 and isHeader==False: #removes .0 if read in as number
# content = content[:-2]
print((content))
if content=="":
blankCols.append(True)
else:
blankCols.append(False)
flag3 =1
fill = colWidths[index]-len(content)
contentString += "{0}{1}{2}".format(content,(fill)*" "," | ")
index +=1
contentString += "\n"
return contentString,blankCols
table = pd.read_excel(xlsFile,usecols=[0,1,2,3],dtype='str')#read in table from excel file
table.fillna("",inplace=True)
widths = getColWidths(table) # get the maximum content width of each column
#print header
outputString = printDivider(widths) #first divider
outputString += printContents(table,widths,isHeader=True)[0] #header
outputString += printDivider(widths,doubleDash=True) #header divider
#print table contents
for row in range(len(table)):
valString,blanks = printContents(table,widths)
outputString += printDivider(widths,blanks)*(row != 0) + valString
outputString += printDivider(widths) #final divider
#copy string to clipboard
pyperclip.copy(outputString)
print ("Table copied to clipboard!")
|
Karpova-Lab/syringe-pump | doc_src/source/images/makeGIF.py | <reponame>Karpova-Lab/syringe-pump
from moviepy.editor import *
from sys import argv
script,xlsFile = argv
# clip = (VideoFileClip(xlsFile)).fadeout(1,final_color=[255,255,255])
clip = (VideoFileClip(xlsFile))
clip_resized = clip.resize(height=550)
clip_resized.write_gif(xlsFile[:-3]+"gif",fps = 20)
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/utils.py | """ Utilities for the Python SDK of the YooniK BiometricInThings API.
"""
import yk_utils.apis
class Key:
"""Manage YooniK BiometricInThings API Subscription Key."""
@classmethod
def set(cls, key: str):
"""Set the Subscription Key.
:param key:
:return:
"""
yk_utils.apis.Key.set(key)
class BaseUrl:
"""Manage YooniK BiometricInThings API Base URL."""
@classmethod
def set(cls, base_url: str):
yk_utils.apis.BaseUrl.set(base_url)
|
jppdpf/YK-BiT-SDK-Python-1 | sample/run_bit_sample.py | import base64
from os import getenv
from yk_bit import BaseUrl, Key, capture, verify_images, verify, status, setup, BiTStatus
"""
First time running:
- Run the BiT App
- Run this SDK App
- If the setup was successful:
- Comment the "setup" block
"""
# BiometricInThings API Environment Variables
EV_BASE_URL = getenv('YK_BIT_BASE_URL')
EV_API_KEY = getenv('YK_BIT_X_API_KEY')
BaseUrl.set(EV_BASE_URL)
Key.set(EV_API_KEY)
def base64_to_file(filename: str, data: str):
with open(f"{filename}.png", "wb") as fh:
fh.write(base64.decodebytes(data.encode('utf-8')))
if __name__ == "__main__":
# Setup
try:
print("Setting up BiT")
setup()
print(f"BiT Setup Successful. \n")
except Exception as ex:
print(f"BiT Setup unsuccessful. \n")
print(ex)
exit()
# Status
bit_availability = status()
print(f"BiT Availability: {bit_availability} \n")
if bit_availability == BiTStatus.Available:
# Capture
captured = capture(capture_timeout=10)
print(f"Capture: \n"
f"\t Status: {captured.capture_status} \n"
# f"\t Image: {captured.image} \n"
)
if captured.image is not None:
base64_to_file("captured", captured.image)
# Verify
verified = verify(reference_image=captured.image, capture_time_out=10, anti_spoofing=True)
print(f"Verify: \n"
f"\t Matching Score: {verified.matching_score} \n"
f"\t Status: {verified.verify_status} \n"
# f"\t Verified Image: {verified.verified_image} \n"
)
base64_to_file("verified", verified.verified_image)
# Verify Images
verified_images = verify_images(probe_image=captured.image, reference_image=verified.verified_image)
print(f"Verify Images: \n"
f"\t Matching Score: {verified_images.matching_score} \n"
f"\t Status: {verified_images.verify_images_status} \n")
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/verify_images_response.py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class VerifyImagesResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, matching_score: float=None, verify_images_status: List[str]=None): # noqa: E501
"""VerifyImagesResponse - a model defined in Swagger
:param matching_score: The matching_score of this VerifyImagesResponse. # noqa: E501
:type matching_score: float
:param verify_images_status: The verify_images_status of this VerifyImagesResponse. # noqa: E501
:type verify_images_status: List[str]
"""
self.swagger_types = {
'matching_score': float,
'verify_images_status': List[str]
}
self.attribute_map = {
'matching_score': 'matching_score',
'verify_images_status': 'verify_images_status'
}
self._matching_score = matching_score
self._verify_images_status = verify_images_status
@classmethod
def from_dict(cls, dikt) -> 'VerifyImagesResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The verify_images_response of this VerifyImagesResponse. # noqa: E501
:rtype: VerifyImagesResponse
"""
return deserialization.deserialize_model(dikt, cls)
@property
def matching_score(self) -> float:
"""Gets the matching_score of this VerifyImagesResponse.
Face matching confidence. Varies between -1 (totally different) to 1 (totally equal). # noqa: E501
:return: The matching_score of this VerifyImagesResponse.
:rtype: float
"""
return self._matching_score
@matching_score.setter
def matching_score(self, matching_score: float):
"""Sets the matching_score of this VerifyImagesResponse.
Face matching confidence. Varies between -1 (totally different) to 1 (totally equal). # noqa: E501
:param matching_score: The matching_score of this VerifyImagesResponse.
:type matching_score: float
"""
self._matching_score = matching_score
@property
def verify_images_status(self) -> List[str]:
"""Gets the verify_images_status of this VerifyImagesResponse.
Face matching status # noqa: E501
:return: The verify_images_status of this VerifyImagesResponse.
:rtype: List[str]
"""
return self._verify_images_status
@verify_images_status.setter
def verify_images_status(self, verify_images_status: List[str]):
"""Sets the verify_images_status of this VerifyImagesResponse.
Face matching status # noqa: E501
:param verify_images_status: The verify_images_status of this VerifyImagesResponse.
:type verify_images_status: List[str]
"""
allowed_values = ["matching_successful", "matching_failed", "reference_face_detection_failed", "probe_face_detection_failed"] # noqa: E501
if not set(verify_images_status).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `verify_images_status` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(verify_images_status) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._verify_images_status = verify_images_status
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/__init__.py | # coding: utf-8
# flake8: noqa
"""
YooniK.Bit API
Functionalities to control biometric capture or verification of the YooniK.BiT. # noqa: E501
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from yk_bit.models.capture_request import CaptureRequest
from yk_bit.models.capture_response import CaptureResponse
from yk_bit.models.verify_request import VerifyRequest
from yk_bit.models.verify_response import VerifyResponse
from yk_bit.models.verify_images_request import VerifyImagesRequest
from yk_bit.models.verify_images_response import VerifyImagesResponse
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/verify_images_request.py | <reponame>jppdpf/YK-BiT-SDK-Python-1<gh_stars>0
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class VerifyImagesRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, probe_image: str=None, reference_image: str=None, matching_score_threshold: float=0.4): # noqa: E501
"""VerifyImagesRequest - a model defined in Swagger
:param probe_image: The probe_image of this VerifyImagesRequest. # noqa: E501
:type probe_image: str
:param reference_image: The reference_image of this VerifyImagesRequest. # noqa: E501
:type reference_image: str
:param matching_score_threshold: The matching_score_threshold of this VerifyImagesRequest. # noqa: E501
:type matching_score_threshold: float
"""
self.swagger_types = {
'probe_image': str,
'reference_image': str,
'matching_score_threshold': float
}
self.attribute_map = {
'probe_image': 'probe_image',
'reference_image': 'reference_image',
'matching_score_threshold': 'matching_score_threshold'
}
self._probe_image = probe_image
self._reference_image = reference_image
self._matching_score_threshold = matching_score_threshold
@classmethod
def from_dict(cls, dikt) -> 'VerifyImagesRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The verify_images_request of this VerifyImagesRequest. # noqa: E501
:rtype: VerifyImagesRequest
"""
return deserialization.deserialize_model(dikt, cls)
@property
def probe_image(self) -> str:
"""Gets the probe_image of this VerifyImagesRequest.
JPG base 64 string face image to be matched against reference image. # noqa: E501
:return: The probe_image of this VerifyImagesRequest.
:rtype: str
"""
return self._probe_image
@probe_image.setter
def probe_image(self, probe_image: str):
"""Sets the probe_image of this VerifyImagesRequest.
JPG base 64 string face image to be matched against reference image. # noqa: E501
:param probe_image: The probe_image of this VerifyImagesRequest.
:type probe_image: str
"""
if probe_image is None:
raise ValueError("Invalid value for `probe_image`, must not be `None`") # noqa: E501
self._probe_image = probe_image
@property
def reference_image(self) -> str:
"""Gets the reference_image of this VerifyImagesRequest.
JPG base 64 string reference face image. # noqa: E501
:return: The reference_image of this VerifyImagesRequest.
:rtype: str
"""
return self._reference_image
@reference_image.setter
def reference_image(self, reference_image: str):
"""Sets the reference_image of this VerifyImagesRequest.
JPG base 64 string reference face image. # noqa: E501
:param reference_image: The reference_image of this VerifyImagesRequest.
:type reference_image: str
"""
if reference_image is None:
raise ValueError("Invalid value for `reference_image`, must not be `None`") # noqa: E501
self._reference_image = reference_image
@property
def matching_score_threshold(self) -> float:
"""Gets the matching_score_threshold of this VerifyImagesRequest.
Matching score threshold used to verify matching between probe and reference image. # noqa: E501
:return: The matching_score_threshold of this VerifyImagesRequest.
:rtype: float
"""
return self._matching_score_threshold
@matching_score_threshold.setter
def matching_score_threshold(self, matching_score_threshold: float):
"""Sets the matching_score_threshold of this VerifyImagesRequest.
Matching score threshold used to verify matching between probe and reference image. # noqa: E501
:param matching_score_threshold: The matching_score_threshold of this VerifyImagesRequest.
:type matching_score_threshold: float
"""
self._matching_score_threshold = matching_score_threshold
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/capture_request.py | <filename>yk_bit/models/capture_request.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class CaptureRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, capture_time_out: float=10.0, anti_spoofing: bool=True, live_quality_analysis: bool=False): # noqa: E501
"""CaptureRequest - a model defined in Swagger
:param capture_time_out: The capture_time_out of this CaptureRequest. # noqa: E501
:type capture_time_out: float
:param anti_spoofing: The anti_spoofing of this CaptureRequest. # noqa: E501
:type anti_spoofing: bool
:param live_quality_analysis: The live_quality_analysis of this CaptureRequest. # noqa: E501
:type live_quality_analysis: bool
"""
self.swagger_types = {
'capture_time_out': float,
'anti_spoofing': bool,
'live_quality_analysis': bool
}
self.attribute_map = {
'capture_time_out': 'capture_time_out',
'anti_spoofing': 'anti_spoofing',
'live_quality_analysis': 'live_quality_analysis'
}
self._capture_time_out = capture_time_out
self._anti_spoofing = anti_spoofing
self._live_quality_analysis = live_quality_analysis
@classmethod
def from_dict(cls, dikt) -> 'CaptureRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The capture_request of this CaptureRequest. # noqa: E501
:rtype: CaptureRequest
"""
return deserialization.deserialize_model(dikt, cls)
@property
def capture_time_out(self) -> float:
"""Gets the capture_time_out of this CaptureRequest.
Capture timeout in seconds. # noqa: E501
:return: The capture_time_out of this CaptureRequest.
:rtype: float
"""
return self._capture_time_out
@capture_time_out.setter
def capture_time_out(self, capture_time_out: float):
"""Sets the capture_time_out of this CaptureRequest.
Capture timeout in seconds. # noqa: E501
:param capture_time_out: The capture_time_out of this CaptureRequest.
:type capture_time_out: float
"""
if capture_time_out is not None and capture_time_out < 0: # noqa: E501
raise ValueError("Invalid value for `capture_time_out`, must be a value greater than or equal to `0`") # noqa: E501
self._capture_time_out = capture_time_out
@property
def anti_spoofing(self) -> bool:
"""Gets the anti_spoofing of this CaptureRequest.
Activate anti-spoofing detection. # noqa: E501
:return: The anti_spoofing of this CaptureRequest.
:rtype: bool
"""
return self._anti_spoofing
@anti_spoofing.setter
def anti_spoofing(self, anti_spoofing: bool):
"""Sets the anti_spoofing of this CaptureRequest.
Activate anti-spoofing detection. # noqa: E501
:param anti_spoofing: The anti_spoofing of this CaptureRequest.
:type anti_spoofing: bool
"""
self._anti_spoofing = anti_spoofing
@property
def live_quality_analysis(self) -> bool:
"""Gets the live_quality_analysis of this CaptureRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:return: The live_quality_analysis of this CaptureRequest.
:rtype: bool
"""
return self._live_quality_analysis
@live_quality_analysis.setter
def live_quality_analysis(self, live_quality_analysis: bool):
"""Sets the live_quality_analysis of this CaptureRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:param live_quality_analysis: The live_quality_analysis of this CaptureRequest.
:type live_quality_analysis: bool
"""
self._live_quality_analysis = live_quality_analysis
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/verify_request.py | <gh_stars>0
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class VerifyRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, capture_time_out: float=10.0, reference_image: str=None, matching_score_threshold: float=0.4, anti_spoofing: bool=True, live_quality_analysis: bool=False, reference_quality_analysis: bool=False): # noqa: E501
"""VerifyRequest - a model defined in Swagger
:param capture_time_out: The capture_time_out of this VerifyRequest. # noqa: E501
:type capture_time_out: float
:param reference_image: The reference_image of this VerifyRequest. # noqa: E501
:type reference_image: str
:param matching_score_threshold: The matching_score_threshold of this VerifyRequest. # noqa: E501
:type matching_score_threshold: float
:param anti_spoofing: The anti_spoofing of this VerifyRequest. # noqa: E501
:type anti_spoofing: bool
:param live_quality_analysis: The live_quality_analysis of this VerifyRequest. # noqa: E501
:type live_quality_analysis: bool
:param reference_quality_analysis: The reference_quality_analysis of this VerifyRequest. # noqa: E501
:type reference_quality_analysis: bool
"""
self.swagger_types = {
'capture_time_out': float,
'reference_image': str,
'matching_score_threshold': float,
'anti_spoofing': bool,
'live_quality_analysis': bool,
'reference_quality_analysis': bool
}
self.attribute_map = {
'capture_time_out': 'capture_time_out',
'reference_image': 'reference_image',
'matching_score_threshold': 'matching_score_threshold',
'anti_spoofing': 'anti_spoofing',
'live_quality_analysis': 'live_quality_analysis',
'reference_quality_analysis': 'reference_quality_analysis'
}
self._capture_time_out = capture_time_out
self._reference_image = reference_image
self._matching_score_threshold = matching_score_threshold
self._anti_spoofing = anti_spoofing
self._live_quality_analysis = live_quality_analysis
self._reference_quality_analysis = reference_quality_analysis
@classmethod
def from_dict(cls, dikt) -> 'VerifyRequest':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The verify_request of this VerifyRequest. # noqa: E501
:rtype: VerifyRequest
"""
return deserialization.deserialize_model(dikt, cls)
@property
def capture_time_out(self) -> float:
"""Gets the capture_time_out of this VerifyRequest.
Capture timeout in seconds. # noqa: E501
:return: The capture_time_out of this VerifyRequest.
:rtype: float
"""
return self._capture_time_out
@capture_time_out.setter
def capture_time_out(self, capture_time_out: float):
"""Sets the capture_time_out of this VerifyRequest.
Capture timeout in seconds. # noqa: E501
:param capture_time_out: The capture_time_out of this VerifyRequest.
:type capture_time_out: float
"""
self._capture_time_out = capture_time_out
@property
def reference_image(self) -> str:
"""Gets the reference_image of this VerifyRequest.
JPG base 64 string reference face image obtained from identification document or any other source. # noqa: E501
:return: The reference_image of this VerifyRequest.
:rtype: str
"""
return self._reference_image
@reference_image.setter
def reference_image(self, reference_image: str):
"""Sets the reference_image of this VerifyRequest.
JPG base 64 string reference face image obtained from identification document or any other source. # noqa: E501
:param reference_image: The reference_image of this VerifyRequest.
:type reference_image: str
"""
self._reference_image = reference_image
@property
def matching_score_threshold(self) -> float:
"""Gets the matching_score_threshold of this VerifyRequest.
Matching score threshold used to verify matching between live and reference image. # noqa: E501
:return: The matching_score_threshold of this VerifyRequest.
:rtype: float
"""
return self._matching_score_threshold
@matching_score_threshold.setter
def matching_score_threshold(self, matching_score_threshold: float):
"""Sets the matching_score_threshold of this VerifyRequest.
Matching score threshold used to verify matching between live and reference image. # noqa: E501
:param matching_score_threshold: The matching_score_threshold of this VerifyRequest.
:type matching_score_threshold: float
"""
self._matching_score_threshold = matching_score_threshold
@property
def anti_spoofing(self) -> bool:
"""Gets the anti_spoofing of this VerifyRequest.
Activate anti-spoofing detection. # noqa: E501
:return: The anti_spoofing of this VerifyRequest.
:rtype: bool
"""
return self._anti_spoofing
@anti_spoofing.setter
def anti_spoofing(self, anti_spoofing: bool):
"""Sets the anti_spoofing of this VerifyRequest.
Activate anti-spoofing detection. # noqa: E501
:param anti_spoofing: The anti_spoofing of this VerifyRequest.
:type anti_spoofing: bool
"""
self._anti_spoofing = anti_spoofing
@property
def live_quality_analysis(self) -> bool:
"""Gets the live_quality_analysis of this VerifyRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:return: The live_quality_analysis of this VerifyRequest.
:rtype: bool
"""
return self._live_quality_analysis
@live_quality_analysis.setter
def live_quality_analysis(self, live_quality_analysis: bool):
"""Sets the live_quality_analysis of this VerifyRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images. # noqa: E501
:param live_quality_analysis: The live_quality_analysis of this VerifyRequest.
:type live_quality_analysis: bool
"""
self._live_quality_analysis = live_quality_analysis
@property
def reference_quality_analysis(self) -> bool:
"""Gets the reference_quality_analysis of this VerifyRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the reference_image. # noqa: E501
:return: The reference_quality_analysis of this VerifyRequest.
:rtype: bool
"""
return self._reference_quality_analysis
@reference_quality_analysis.setter
def reference_quality_analysis(self, reference_quality_analysis: bool):
"""Sets the reference_quality_analysis of this VerifyRequest.
Activate ISO/ICAO-19794-5 face quality compliance checks on the reference_image. # noqa: E501
:param reference_quality_analysis: The reference_quality_analysis of this VerifyRequest.
:type reference_quality_analysis: bool
"""
self._reference_quality_analysis = reference_quality_analysis
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/verify_response.py | <filename>yk_bit/models/verify_response.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import Model
from yk_utils.models import deserialization
class VerifyResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, matching_score: float=None, verified_image: str=None, verify_status: List[str]=None): # noqa: E501
"""VerifyResponse - a model defined in Swagger
:param matching_score: The matching_score of this VerifyResponse. # noqa: E501
:type matching_score: float
:param verified_image: The verified_image of this VerifyResponse. # noqa: E501
:type verified_image: str
:param verify_status: The verify_status of this VerifyResponse. # noqa: E501
:type verify_status: List[str]
"""
self.swagger_types = {
'matching_score': float,
'verified_image': str,
'verify_status': List[str]
}
self.attribute_map = {
'matching_score': 'matching_score',
'verified_image': 'verified_image',
'verify_status': 'verify_status'
}
self._matching_score = matching_score
self._verified_image = verified_image
self._verify_status = verify_status
@classmethod
def from_dict(cls, dikt) -> 'VerifyResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The verify_response of this VerifyResponse. # noqa: E501
:rtype: VerifyResponse
"""
return deserialization.deserialize_model(dikt, cls)
@property
def matching_score(self) -> float:
"""Gets the matching_score of this VerifyResponse.
Face matching confidence. Varies between -1 (totally different) to 1 (totally equal). # noqa: E501
:return: The matching_score of this VerifyResponse.
:rtype: float
"""
return self._matching_score
@matching_score.setter
def matching_score(self, matching_score: float):
"""Sets the matching_score of this VerifyResponse.
Face matching confidence. Varies between -1 (totally different) to 1 (totally equal). # noqa: E501
:param matching_score: The matching_score of this VerifyResponse.
:type matching_score: float
"""
self._matching_score = matching_score
@property
def verified_image(self) -> str:
"""Gets the verified_image of this VerifyResponse.
JPG base 64 string thumbnail of the live matched image # noqa: E501
:return: The verified_image of this VerifyResponse.
:rtype: str
"""
return self._verified_image
@verified_image.setter
def verified_image(self, verified_image: str):
"""Sets the verified_image of this VerifyResponse.
JPG base 64 string thumbnail of the live matched image # noqa: E501
:param verified_image: The verified_image of this VerifyResponse.
:type verified_image: str
"""
self._verified_image = verified_image
@property
def verify_status(self) -> List[str]:
"""Gets the verify_status of this VerifyResponse.
Face verification status # noqa: E501
:return: The verify_status of this VerifyResponse.
:rtype: List[str]
"""
return self._verify_status
@verify_status.setter
def verify_status(self, verify_status: List[str]):
"""Sets the verify_status of this VerifyResponse.
Face verification status # noqa: E501
:param verify_status: The verify_status of this VerifyResponse.
:type verify_status: List[str]
"""
allowed_values = ["matching_successful", "matching_failed", "capture_timedout", "camera_failed", "reference_face_detection_failed", "live_face_detection_failed", "liveness_detection_failed", "live_face_quality_failed", "reference_face_quality_failed"] # noqa: E501
if not set(verify_status).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `verify_status` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(verify_status) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._verify_status = verify_status
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/bit.py | <reponame>jppdpf/YK-BiT-SDK-Python-1
""" Module of the YooniK BiometricInThings API.
"""
from enum import Enum
from yk_utils.images import parse_image
from yk_utils.apis import request, YoonikApiException
from yk_bit.models import CaptureRequest, VerifyImagesRequest, VerifyRequest, \
CaptureResponse, VerifyResponse, VerifyImagesResponse
class BiTStatus(Enum):
NotAvailable = 0
Available = 1
def capture(capture_timeout: float = 10, anti_spoofing: bool = True,
live_quality_analysis: bool = False) -> CaptureResponse:
""" Provides a live captured frame from the devices camera in base 64 format and its quality metrics.
:param capture_timeout:
Capture timeout in seconds.
:param anti_spoofing:
Activates anti-spoofing detection.
:param live_quality_analysis:
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images.
:return:
The captured image in base 64 format and the capture status.
"""
url = 'bit/capture'
capture_request = CaptureRequest(capture_timeout, anti_spoofing, live_quality_analysis).to_dict()
response = request('POST', url, json=capture_request)
return CaptureResponse.from_dict(response)
def verify(reference_image, capture_time_out: float = 10.0, matching_score_threshold: float = 0.4,
anti_spoofing: bool = True, live_quality_analysis: bool = False,
reference_quality_analysis: bool = False) -> VerifyResponse:
""" Captures a live frame from the camera and cross examines with the reference image.
:param reference_image:
Image can be a string, a file path or a file-like object.
:param reference_quality_analysis:
Activate ISO/ICAO-19794-5 face quality compliance checks on the image.
:param live_quality_analysis:
Activate ISO/ICAO-19794-5 face quality compliance checks on the live face images.
:param anti_spoofing:
Activate anti-spoofing detection.
:param matching_score_threshold:
Defines the minimum acceptable score for a positive match.
:param capture_time_out:
Capture timeout in seconds.
:return:
The frame that was captured to verify against the reference in base 64 format,
its verification status and the matching score.
"""
url = 'bit/verify'
verify_request = VerifyRequest(
reference_image=parse_image(reference_image),
capture_time_out=capture_time_out,
matching_score_threshold=matching_score_threshold,
anti_spoofing=anti_spoofing,
live_quality_analysis=live_quality_analysis,
reference_quality_analysis=reference_quality_analysis,
).to_dict()
response = request('POST', url, json=verify_request)
return VerifyResponse.from_dict(response)
def verify_images(probe_image, reference_image, matching_score_threshold: float = 0.4) \
-> VerifyImagesResponse:
""" Performs face matching between the two provided images.
:param matching_score_threshold:
Defines the minimum acceptable score for a positive match.
:param probe_image:
Image can be a string or a file path or a file-like object.
:param reference_image:
Image can be a string or a file path or a file-like object.
:return:
The matching score and the verified images status.
"""
url = 'bit/verify_images'
verify_images_request = VerifyImagesRequest(
probe_image=parse_image(probe_image),
reference_image=parse_image(reference_image),
matching_score_threshold=matching_score_threshold
).to_dict()
response = request('POST', url, json=verify_images_request)
return VerifyImagesResponse.from_dict(response)
def status() -> BiTStatus:
""" Checks for the camera status. """
url = 'bit/status'
# If the camera is available the request is answered with an empty 200 OK. Otherwise exception is thrown.
try:
request('GET', url)
except YoonikApiException as bit_exception:
if bit_exception.status_code == 503:
return BiTStatus.NotAvailable
raise
return BiTStatus.Available
def setup():
"""
Perform BiT setup actions
"""
url = 'bit/setup'
request('GET', url)
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/models/capture_response.py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from yk_utils.models import deserialization
from yk_utils.models import Model
class CaptureResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, image: str=None, capture_status: List[str]=None): # noqa: E501
"""CaptureResponse - a model defined in Swagger
:param image: The image of this CaptureResponse. # noqa: E501
:type image: str
:param capture_status: The capture_status of this CaptureResponse. # noqa: E501
:type capture_status: List[str]
"""
self.swagger_types = {
'image': str,
'capture_status': List[str]
}
self.attribute_map = {
'image': 'image',
'capture_status': 'capture_status'
}
self._image = image
self._capture_status = capture_status
@classmethod
def from_dict(cls, dikt) -> 'CaptureResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The capture_response of this CaptureResponse. # noqa: E501
:rtype: CaptureResponse
"""
return deserialization.deserialize_model(dikt, cls)
@property
def image(self) -> str:
"""Gets the image of this CaptureResponse.
JPG Base 64 string face crop according to ISO/ICAO 19794-5 Token. # noqa: E501
:return: The image of this CaptureResponse.
:rtype: str
"""
return self._image
@image.setter
def image(self, image: str):
"""Sets the image of this CaptureResponse.
JPG Base 64 string face crop according to ISO/ICAO 19794-5 Token. # noqa: E501
:param image: The image of this CaptureResponse.
:type image: str
"""
self._image = image
@property
def capture_status(self) -> List[str]:
"""Gets the capture_status of this CaptureResponse.
Face capture result status # noqa: E501
:return: The capture_status of this CaptureResponse.
:rtype: List[str]
"""
return self._capture_status
@capture_status.setter
def capture_status(self, capture_status: List[str]):
"""Sets the capture_status of this CaptureResponse.
Face capture result status # noqa: E501
:param capture_status: The capture_status of this CaptureResponse.
:type capture_status: List[str]
"""
allowed_values = ["capture_successful", "capture_timedout", "camera_failed", "live_face_detection_failed", "live_face_quality_failed", "liveness_detection_failed"] # noqa: E501
if not set(capture_status).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `capture_status` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(capture_status) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._capture_status = capture_status
|
jppdpf/YK-BiT-SDK-Python-1 | yk_bit/__init__.py | <gh_stars>0
from . import bit
from .utils import Key, BaseUrl
from .bit import capture, verify_images, verify, status, setup, BiTStatus
|
ben8622/rover-control-tutorial | server/classes/Arduino.py | import serial
import time
## This serial port will change depending on your OS and other things
## A good way to find your port is to open up the Arduino IDE, click
## Tools -> Port and the string before your arduino is the port.
SERIAL_PORT = 'COM3'
BAUD = 9600
class Arduino:
def __init__(self):
super().__init__()
self.arduino = serial.Serial(SERIAL_PORT, BAUD)
print("Arduino connected")
## neccesary for the arduino to start up
time.sleep(1)
def send(self, command):
self.arduino.write(command.encode('utf8'))
## Arduino echoes back that the delay is changing
return self.arduino.readline() |
ben8622/rover-control-tutorial | client/main.py | <reponame>ben8622/rover-control-tutorial
# Client main.py
from classes import ClientComms
from tkinter import *
## Socket Connection Info
HOST = "localhost"
PORT = 5000
## GUI Setup
root = Tk()
root.title("Rover Control Tutorial")
command_entry = Entry(root)
command_entry.pack(padx=5, pady=5)
btn = Button(root, text="Send Command", bg="green", fg="white")
btn.pack(padx=5, pady=5)
def main():
## Connect the socket
comms = ClientComms.ClientComms(HOST, PORT)
print("Comms connected!")
## Assign a callback when the button gets pushed in GUI
btn['command'] = lambda: comms.send(command_entry.get())
root.mainloop()
## Mainloop exits, send quit to server and cleanup
comms.send("quit")
comms.socket.close()
print("Client shutting down.")
if __name__ == "__main__":
main() |
ben8622/rover-control-tutorial | server/main.py | <filename>server/main.py
# Client main.py
from classes import ServerComms
HOST = "localhost"
PORT = 5000
def main():
comms = ServerComms.ServerComms(HOST, PORT)
comms.close()
if __name__ == "__main__":
main() |
ben8622/rover-control-tutorial | client/classes/ClientComms.py | # CLIENTS COMMUNICATIONS
import socket
import threading
class ClientComms:
def __init__(self, HOST, PORT):
self.HOST = HOST
self.PORT = PORT
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.HOST, self.PORT))
def send(self, command):
self.socket.send(command.encode('utf8'))
|
ben8622/rover-control-tutorial | server/classes/ServerComms.py | # SERVER COMMUNICATIONS
import socket
import threading
import time
from .Arduino import Arduino
class ServerComms:
def __init__(self, HOST, PORT):
self.HOST = HOST
self.PORT = PORT
## Connect Arduino
self.arduino = Arduino()
## Create Server Socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((HOST, PORT))
## Wait for client to connect
self.socket.listen(5)
self.conn, self.address = self.socket.accept()
self.receive()
def receive(self):
received = ""
sending = ""
while(True):
received = self.conn.recv(2048)
received = received.decode('utf8')
if(received == "quit"):
print("Serving shutting down.")
self.socket.close()
break
else:
print("SENDING TO ARDUINO: " + received)
response = self.arduino.send(received).decode('utf8')
print("Arduino sent: ", response)
def send(self, command):
self.socket.send(command.encode('utf8'))
def close(self):
self.arduino.close()
self.socket.close()
|
PlantG3/ZmKmerGWAS | kmerPrediction/python_notebooks/Run_DTS_prediction_analysis_parallel.py | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#import needed modules
import os
import pandas as pd
pd.set_option('display.max_rows', 200)
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.gofplots import qqplot
from scipy.stats import boxcox
from sklearn.linear_model import LinearRegression, RidgeCV, Ridge, LassoCV
from datetime import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy.stats import pearsonr
import pickle
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
import time
import random
random.seed(12345)
import mkl
mkl.set_num_threads(1)
#required for keras model
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import backend as K
import gc
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only allocate 1GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3900)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
# In[ ]:
'''
#explore and/or transform phenotype data
def check_normality(numpy_array):
fig, ax = plt.subplots(1, 2, figsize=(9,4))
ax[0].hist(numpy_array)
qqplot(numpy_array, line='s', ax=ax[1])
plt.show()
def max_sqrt(x):
return np.sqrt(np.max(x+1) - x)
def max_log10(x):
return np.log10(np.max(x+1) - x)
def max_inverse(x):
return 1/(np.max(x+1) - x)
all_phenos = pd.read_csv("../data/maize282_phenotype_summary11oct2021.txt", sep="\t", index_col=[0])
for pheno in all_phenos.columns:
print(pheno)
sng_pheno = all_phenos[pheno].dropna().copy()
# for oil, transform with boxcox
if pheno == "Oil":
sng_pheno = pd.Series(boxcox(sng_pheno)[0], index=sng_pheno.index)
all_phenos[pheno] = sng_pheno
if pheno == "ULA":
sng_pheno = pd.Series(boxcox(max_sqrt(np.sqrt(all_phenos[pheno].dropna())))[0], index=sng_pheno.index)
all_phenos[pheno] = sng_pheno
check_normality(all_phenos[pheno].dropna().copy())
all_phenos.to_csv("../data/maize282_phenotype_transformed11oct2021.txt", sep="\t")
'''
all_phenos = pd.read_csv("../data/maize282_phenotype_transformed11oct2021.txt", sep="\t", index_col=[0])
# In[ ]:
print(str(multiprocessing.cpu_count()) + " CPUs available.")
# In[ ]:
n_cpus_vectorizer = 22
n_cpus_models = 22
print("Using "+str(n_cpus_vectorizer)+ " CPUs for vectorizer.")
print("Using "+str(n_cpus_models)+ " CPUs for models.")
#select which models, vectorizors, and testing schemes
vectorizors = ["CountVectorizer", "TfidfVectorizer"]
models = ["LinearRegression", "Ridge", "Lasso", "NeuralNetwork"]
#models = ["NeuralNetwork"] # run NN part of models not run on cluster
###IN CURRENT SCRIPT splitting can only have one value
splitting = ['Cluster_282_11k']
#splitting = ["Random_Kfolds_11k"]
#phenotype = "Oil"
phenotype = "ULA"
#phenotype = "FT"
#dataSets = ["1M_random_121320_raw_"+phenotype] #for all analyses with randomly chosen k-mers
#dataSets = ["1M_associated_kmeans_102421_FT"]
#dataSets = ["1M_associated_randomFolds_102521_FT"]
#dataSets = ["1M_associated_randomFolds_110121_ULA"]
#dataSets = ["1M_associated_kmeans_110121_ULA"]
#dataSets = ["1M_associated_kmeans_110421_Oil"]
#dataSets = ["1M_associated_randomFolds_110421_Oil"]
include_abandance = [False]
#include_abandance = [True] #, False]
#specify file name for run results to be save
res_folds = datetime.now().strftime('%m%d%Y_%H%M%S')+"_results_folds.csv" #results from each fold
print(res_folds)
#put selections into table of required runs
run_params = [] #will contain run perameters from above for all runs to be performed
for vec in vectorizors:
for mod in models:
for spl in splitting:
for dat in dataSets:
for inc in include_abandance:
run_params.append([vec,mod,spl,dat,inc])
run_params = pd.DataFrame(run_params, columns=["vectorizor", "model", "splitting", "dataSet", "include_abandance"])
run_params = run_params.sort_values(["dataSet","include_abandance","splitting","vectorizor"]).reset_index(drop=True) #change order for most efficient runs
# In[ ]:
print(run_params)
# In[ ]:
def vectorize(run, train_set, test_set282, test_setNAM):
#vectorize.
if run["vectorizor"] == "CountVectorizer":
#count vectorizer
vectorizer = CountVectorizer()
elif run["vectorizor"] == "TfidfVectorizer":
#TFIDF vectorizor
vectorizer = TfidfVectorizer(min_df = 1 , max_df = 1.0, sublinear_tf=True,use_idf=True)
#setup vectorizors based on all sentences in dataset (for genomic prediction context we would have these)
all_sentences = pd.concat([train_set, test_set282, test_setNAM])
vectorizer.fit(all_sentences["sentence"].values)
#create vectorized training and testing sets
X_train = vectorizer.transform(train_set["sentence"].values)
y_train = train_set["value"]
X_test282 = vectorizer.transform(test_set282["sentence"].values)
y_test282 = test_set282["value"]
if len(test_setNAM)>0:
X_testNAM = vectorizer.transform(test_setNAM["sentence"].values)
y_testNAM = test_setNAM["value"]
else:
X_testNAM = pd.DataFrame([])
y_testNAM = pd.Series([])
#print(X_train.shape, len(y_train), X_test282.shape, len(y_test282), X_testNAM.shape, len(y_testNAM))
return X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM
# In[ ]:
def test_regression_continuous_data(run, fold, regression, X_train, y_train, setname):
pred = regression.predict(X_train)
obs = y_train.values
res=pd.DataFrame([y_train.index.tolist(),pred,obs], index=["Taxa","Pred","Obs"]).T
for col in run.index:
#print(col, run.loc[col])
res[col]=run.loc[col]
res["Fold"]=fold
res["Set"] = setname
res = res[['Fold', 'Set', 'vectorizor', 'model', 'splitting', 'dataSet', 'include_abandance', 'Taxa', 'Pred', 'Obs']]
return res
# In[ ]:
def run_eval_regression_reps(LR, run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM, reps):
reg_reps=[]
for rep in range(0, reps):
print(rep)
LR.fit(X_train, np.asarray(y_train.values))
#test model on all datasets
res=[]
res.append(test_regression_continuous_data(run, fold, LR, X_train, y_train, setname="Train"))
res.append(test_regression_continuous_data(run, fold, LR, X_test282, y_test282, setname="Test282"))
if len(y_testNAM)>0:
res.append(test_regression_continuous_data(run, fold, LR, X_testNAM, y_testNAM, setname="TestNAM"))
res = pd.concat(res)
res["rep"]=rep
reg_reps.append(res)
reg_reps = pd.concat(reg_reps)
return reg_reps
# In[ ]:
def run_eval_keras_in_replicate(run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM, reps):
keras_reps=[]
for rep in range(0, reps):
print(rep)
model, history = run_keras_sngl_model(X_train, y_train)
res=[]
res.append(eval_keras_model(run, fold, model, X_train, y_train, setname="Train"))
res.append(eval_keras_model(run, fold, model, X_test282, y_test282, setname="Test282"))
if len(y_testNAM)>0:
res.append(eval_keras_model(run, fold, model, X_testNAM, y_testNAM, setname="TestNAM"))
res = pd.concat(res)
res["rep"]=rep
keras_reps.append(res)
keras_reps = pd.concat(keras_reps)
return keras_reps
# In[ ]:
def run_analysis(run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM):
#run analysis
if run["model"] in ["LinearRegression", "Ridge", "Lasso"]:
#Fit regression
if run["model"]=="LinearRegression":
LR = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=1)
elif run["model"]=="Ridge":
alphas = [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20]
LR= RidgeCV(alphas, cv=5)
elif run["model"]=="Lasso":
LR = LassoCV(cv=5, n_jobs=1, max_iter=2000) #, random_state=12345
res = run_eval_regression_reps(LR, run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM, reps=1)
elif run["model"]=="NeuralNetwork":
#print("Neural Net not yet implemented")
res = run_eval_keras_in_replicate(run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM, reps=10)
return res
# In[ ]:
def run_keras_sngl_model(X_train, y_train):
#setup model
K.clear_session()
gc.collect()
input_dim = X_train.shape[1] # Number of features
model = Sequential()
#model.add(layers.Dense(100, input_dim=input_dim, activation='relu'))
#model.add(layers.Dense(50, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
#model.summary()
#train model
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
history = model.fit(X_train.toarray(), y_train.values, epochs=200,
validation_split=0.1,
batch_size=10, callbacks=[callback], verbose=0)
return model, history
def eval_keras_model(run, fold, model, X_train, y_train, setname):
pred = model.predict(X_train.toarray(), batch_size=10, verbose=False).flatten()
obs = y_train.values
res=pd.DataFrame([y_train.index.tolist(), pred,obs], index=["Taxa","Pred","Obs"]).T
for col in run.index:
#print(col, run.loc[col])
res[col]=run.loc[col]
res["Fold"]=fold
res["Set"] = setname
res = res[['Fold', 'Set', 'vectorizor', 'model', 'splitting', 'dataSet', 'include_abandance', 'Taxa','Pred', 'Obs']]
return res
# In[ ]:
def import_data_by_fold(fold, fold_sets, with_abnd, phe_file, abnd_file):
#import set data by fold
sets_in_fold = fold_sets[fold_sets["Fold"]==fold].copy()
train_set_cltvrs = sets_in_fold[sets_in_fold["Set"]=="282train"]["Taxa"].tolist()
test_set282_cltvrs = sets_in_fold[sets_in_fold["Set"]=="282test"]["Taxa"].tolist()
print(abnd_file)
#import abandance and phenotype data
data_set, _, _ = read_data(phe_file,abnd_file,with_abnd)
#create training and testing sets
train_set = data_set.loc[[x for x in train_set_cltvrs if x in data_set.index.tolist()]]
test_set282 = data_set.loc[[x for x in test_set282_cltvrs if x in data_set.index.tolist()]]
#No NAM test set exists for this data
test_setNAM = pd.DataFrame([])
testNAM_uni_tokens = []
print(len(train_set), len(test_set282))
#sainity check
if len([x for x in train_set.index if x in test_set282.index.tolist()]) != 0:
print("TRAIN and TEST set CONTAMINATION!!!!!")
sys.exit(1)
return train_set, test_set282, test_setNAM
# In[ ]:
def import_data_by_fold_associated_kmers(fold, fold_sets, phe_file, abnd_train, abnd_test, with_abnd=False):
#import set data by fold
sets_in_fold = fold_sets[fold_sets["Fold"]==fold].copy()
train_set_cltvrs = sets_in_fold[sets_in_fold["Set"]=="282train"]["Taxa"].tolist()
test_set282_cltvrs = sets_in_fold[sets_in_fold["Set"]=="282test"]["Taxa"].tolist()
print(abnd_train)
print(abnd_test)
#import abandance and phenotype data by fold
train_set, train_set_uni_tokens, _ = read_data(phe_file, abnd_file=abnd_train, with_abnd=with_abnd)
train_set = train_set.loc[[x for x in train_set_cltvrs if x in train_set.index.tolist()]]
test_set282, test282_uni_tokens, _ = read_data(phe_file, abnd_file=abnd_test, with_abnd=with_abnd)
test_set282 = test_set282.loc[[x for x in test_set282_cltvrs if x in test_set282.index.tolist()]]
test_setNAM = pd.DataFrame([])
testNAM_uni_tokens = []
#sanity check!
if len([x for x in train_set.index.tolist() if x not in train_set_cltvrs]) != 0:
print("STOP!!! Train set does not match cultivar list.")
sys.exit(1)
if len([x for x in test_set282.index.tolist() if x not in test_set282_cltvrs]) != 0:
print("STOP!!! Train set does not match cultivar list.")
sys.exit(1)
if len([x for x in train_set.index if x in test_set282.index.tolist()]) != 0:
print("TRAIN and TEST set CONTAMINATION!!!!!")
sys.exit(1)
print (len(train_set_cltvrs), len(test_set282_cltvrs))
print(len(train_set), len(test_set282))
return train_set, test_set282, test_setNAM
# In[ ]:
#bring in fold splits
if len(run_params["splitting"].unique()) ==1:
if run_params["splitting"].unique()[0] == 'Random_Kfolds_11k':
fold_sets = pd.read_csv("../data/random_11k_folds_sets_12Dec2020.csv")
elif run_params["splitting"].unique()[0] == 'Cluster_282_11k':
fold_sets = pd.read_csv("../data/kmeans_11k_folds_sets_12Dec2020.csv")
include_folds =(0,len(fold_sets["Fold"].unique()))
for fold in fold_sets["Fold"].unique()[include_folds[0]:include_folds[1]]:
print(fold, len(fold_sets[(fold_sets["Fold"]==fold) & (fold_sets["Set"]=="282train")]),
len(fold_sets[(fold_sets["Fold"]==fold) & (fold_sets["Set"]=="282test")]),
len(fold_sets[(fold_sets["Fold"]==fold) & (fold_sets["Set"]=="NAMtest")]))
# In[ ]:
print(run_params)
# In[ ]:
#assigne details for all k-folds vectorizors and sets
filt=None
fold_vec_runs = []
for fold in fold_sets["Fold"].unique()[include_folds[0]:include_folds[1]]:
for vect in run_params["vectorizor"].unique():
for aband in run_params["include_abandance"].unique():
#print(fold)
if run_params["dataSet"][0]=="1M_associated_kmeans_102421_FT":
abnd_train = "../data/11fold_kmean_DTS_102421/train_raw/DTS"+str(fold)+"_top1M_kmean_train_raw_KOC.txt"
abnd_test = "../data/11fold_kmean_DTS_102421/test_raw/DTS"+str(fold)+"_top1M_kmean_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_associated_randomFolds_102521_FT":
abnd_train = "../data/11_fold_random_DTS_102521/train_raw/DTS"+str(fold)+"_top1M_random_train_raw_KOC.txt"
abnd_test = "../data/11_fold_random_DTS_102521/test_raw/DTS"+str(fold)+"_top1M_random_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_associated_randomFolds_110121_ULA":
abnd_train = "../data/11_fold_random_ULA_110121/train_raw/ULA"+str(fold)+"_top1M_random_train_raw_KOC.txt"
abnd_test = "../data/11_fold_random_ULA_110121/test_raw/ULA"+str(fold)+"_top1M_random_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_associated_kmeans_110121_ULA":
abnd_train = "../data/11_fold_kmean_ULA_110121/train_raw/ULA"+str(fold)+"_top1M_kmean_train_raw_KOC.txt"
abnd_test = "../data/11_fold_kmean_ULA_110121/test_raw/ULA"+str(fold)+"_top1M_kmean_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_associated_kmeans_110421_Oil":
abnd_train = "../data/11_fold_kmean_oil_110421/train_raw/oil"+str(fold)+"_top1M_kmean_train_raw_KOC.txt"
abnd_test = "../data/11_fold_kmean_oil_110421/test_raw/oil"+str(fold)+"_top1M_kmean_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_associated_randomFolds_110421_Oil":
abnd_train = "../data/11_fold_random_oil_110421/train_raw/oil"+str(fold)+"_top1M_random_train_raw_KOC.txt"
abnd_test = "../data/11_fold_random_oil_110421/test_raw/oil"+str(fold)+"_top1M_random_test_raw_KOC.txt"
elif run_params["dataSet"][0]=="1M_random_121320_raw_"+phenotype:
abnd_train=""
abnd_test=""
#set k-mer abandance file
abnd_file="../data/maize282.k31.random.1M.KOC.txt"
fold_vec_runs.append([fold, vect, run_params["splitting"].iloc[0], run_params["dataSet"].iloc[0],
aband, abnd_train, abnd_test])
fold_vec_runs = pd.DataFrame(fold_vec_runs, columns = ["Fold", "vectorizor", "splitting","dataSet","include_abandance", "abnd_train", "abnd_test"])
# In[ ]:
print(fold_vec_runs)
print(fold_vec_runs["abnd_train"].str.split("/", expand=True))
print(fold_vec_runs["abnd_test"].str.split("/", expand=True))
# In[ ]:
def read_data(phe_file, abnd_file, with_abnd):
#read in data
if type(phe_file) == pd.core.frame.DataFrame:
kmer_phe = phe_file.copy()
else:
kmer_phe = pd.read_csv(phe_file, sep="\t", index_col=[0]).dropna()
kmer_abnd = pd.read_csv(abnd_file, sep="\t", index_col=[0])
if type(filt)==int:
pval_col = [x for x in kmer_abnd.columns if x.split("_")[-1]=="pvals"]
if len(pval_col)==1:
kmer_abnd = kmer_abnd.sort_values(pval_col[0]) # sort by p-value
kmer_abnd = kmer_abnd.iloc[:filt] #take only the top "filt" number
print("Selecting the top "+str(filt)+"k-mers based on "+pval_col[0])
print("Max and min pvals:", kmer_abnd[pval_col[0]].max(), kmer_abnd[pval_col[0]].min())
print("dataset size:", len(kmer_abnd))
else:
print("Randomly selecting "+str(filt)+" kmers.")
rand_sample = random.sample(kmer_abnd.index.tolist(), filt)
kmer_abnd = kmer_abnd.loc[rand_sample]
#fix column name issues
kmer_abnd.index.name = "id"
kmer_abnd.rename(columns={"B73-1":"B73", "B97-1":"B97"}, inplace=True)
kmer_phe.index.name = "Taxa"
kmer_phe.columns = ["phe"]
kmer_phe = kmer_phe.loc[[x for x in kmer_phe.index.tolist() if x in kmer_abnd.columns.tolist()]]
#Create token list
#with_abnd=True #include multiple copies of each kmer based on its abandance
token_list=[]
for geno in [x for x in kmer_phe.index.tolist() if x in kmer_abnd.columns.tolist()]:
if with_abnd:
tmp = kmer_abnd[kmer_abnd[geno]>0][geno].reset_index()
#tmp = ((tmp["id"].str.lower()+" ") * tmp[geno]).str.strip()#.str.split(" ")
tmp = ((tmp["id"].str.lower()+" ") * tmp[geno].round().astype(int)).str.strip()#.str.split(" ")
else:
tmp = kmer_abnd[kmer_abnd[geno]>0].index.str.lower().tolist()
token_list.append(" ".join(tmp))
kmer_phe["tokens"]=token_list
unique_tokens = list(set(kmer_abnd.index.tolist()))
kmer_phe.columns = ["value","sentence"]
return kmer_phe, unique_tokens, kmer_abnd
# In[ ]:
def create_save_vec_sets(fold_vec_run, base):
start = time.time() - base
out_file = "@".join(fold_vec_run[["Fold","vectorizor","splitting","dataSet","include_abandance"]].astype(str).tolist())+".p"
#print(out_file)
if out_file in os.listdir("../data/TMP_vectorizors/"):
print("File already exsist: "+ out_file)
else:
print("Loading Dataset and creating file: "+ out_file)
if (run_params["dataSet"][0]=="1M_random_121320_raw_"+phenotype):
train_set, test_set282, test_setNAM = import_data_by_fold(fold_vec_run["Fold"], fold_sets.copy(), with_abnd=fold_vec_run["include_abandance"],
phe_file = all_phenos[[run_params["dataSet"][0].split("_")[-1]]].dropna(),
abnd_file = abnd_file)
print(abnd_file)
else:
train_set, test_set282, test_setNAM = import_data_by_fold_associated_kmers(fold_vec_run["Fold"], fold_sets.copy(), with_abnd=fold_vec_run["include_abandance"],
phe_file = all_phenos[[run_params["dataSet"][0].split("_")[-1]]].dropna(),
abnd_train = fold_vec_run["abnd_train"],
abnd_test = fold_vec_run["abnd_test"])
print(fold_vec_run["abnd_train"])
print(fold_vec_run["abnd_test"])
print(len(train_set), len(test_set282), len(test_setNAM))
print("Creating new vectorized training and testing sets. "+ out_file)
#vectorize data sets
X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM = vectorize(fold_vec_run, train_set, test_set282, test_setNAM)
print(X_train.shape, len(y_train), X_test282.shape, len(y_test282), X_testNAM.shape, len(y_testNAM), out_file)
print("saving pickle "+ out_file)
with open("../data/TMP_vectorizors/"+out_file, "wb") as out_p:
pickle.dump([X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM], out_p)
stop = time.time() - base
return start, stop, out_file
# In[ ]:
def multiprocess(func, args, workers):
begin_time = time.time()
with ProcessPoolExecutor(max_workers=workers) as executor:
res = executor.map(func, args, [begin_time for i in range(len(args))])
return list(res)
#run all in parallel and save results
args = [fold_vec_runs.loc[x] for x in fold_vec_runs.index]
results = multiprocess(create_save_vec_sets, args, n_cpus_vectorizer)
# In[ ]:
print(run_params)
# In[ ]:
#run each fold_vec_run on it own cpu
def run_save_analysis_sets(fold_vec_run, base):
start = time.time() - base
### IMPORT SETS ###
print("Importing new vectorized training and testing sets.")
#print(fold_vec_run)
in_file = "@".join(fold_vec_run[['Fold','vectorizor', 'splitting', 'dataSet', 'include_abandance']].astype(str).tolist())+".p"
print(in_file)
with open("../data/TMP_vectorizors/"+in_file, "rb") as f:
X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM = pickle.load(f)
#X_train = X_train[:,:100] #for testing
#X_test282 = X_test282[:,:100]
print(X_train.shape, len(y_train), X_test282.shape, len(y_test282), X_testNAM.shape, len(y_testNAM))
### Run each analysis ###
print("Running Analysis.")
#setup run params
run_params_tmp = run_params[(run_params["vectorizor"]==fold_vec_run["vectorizor"]) &
(run_params["splitting"]==fold_vec_run["splitting"]) &
(run_params["dataSet"]==fold_vec_run["dataSet"]) &
(run_params["include_abandance"]==fold_vec_run["include_abandance"])].copy()
for run in run_params_tmp.iterrows():
run = run[1]
if run["model"] == "NeuralNetwork":
print("NeuralNetwork cannot be run in parallel. Will be run in series as end.")
continue
print(run["model"])
run_results = run_analysis(run, fold_vec_run["Fold"], X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM)
run_results["refreshed"]="vectorizor"
#record and save results
#check if results file exist
if res_folds not in os.listdir("../results"):
run_results.to_csv("../results/"+res_folds, mode="w")
else:
run_results.to_csv("../results/"+res_folds, mode="a")
stop = time.time() - base
return start, stop
# In[ ]:
if len([x for x in run_params["model"].unique() if x != "NeuralNetwork"]) > 1:
print("running non-neural network models in parallel.")
results = multiprocess(run_save_analysis_sets, args, n_cpus_models)
# In[ ]:
if "NeuralNetwork" in run_params["model"].unique():
print("Running Neural Network models in series.")
run_params_NN = run_params[run_params["model"]=="NeuralNetwork"].copy().reset_index(drop=True)
print(run_params_NN)
results_folds=[]
for fold in fold_sets["Fold"].unique()[include_folds[0]:include_folds[1]]:
print(fold)
results_folds=[] #record which data was refreshed
prev_run=pd.Series(index=run_params_NN.loc[0].index, dtype='str')
for run in run_params_NN.iterrows():
refresh=[] #record which data was refreshed
run=run[1]
print(run.tolist())
#import data, and vectorizor
#vectorize training and testing sets
if ((run["dataSet"]== prev_run["dataSet"]) and
(run["include_abandance"]==prev_run["include_abandance"]) and
(run["splitting"]== prev_run["splitting"]) and
(run["vectorizor"]== prev_run["vectorizor"])):
print("Using previous vectorizor.")
else:
print("Importing new vectorized training and testing sets.")
in_file = "@".join([str(fold)]+run[['vectorizor', 'splitting', 'dataSet', 'include_abandance']].astype(str).tolist())+".p"
with open("../data/TMP_vectorizors/"+in_file, "rb") as f:
X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM = pickle.load(f)
print(X_train.shape, len(y_train), X_test282.shape, len(y_test282), X_testNAM.shape, len(y_testNAM))
refresh.append("vectorizor")
#run the analysis
print("Running Analysis.")
run_results = run_analysis(run, fold, X_train, y_train, X_test282, y_test282, X_testNAM, y_testNAM)
run_results["refreshed"]=str(refresh)
#save data
#record and save results
#check if results file exist
if res_folds not in os.listdir("../results"):
run_results.to_csv("../results/"+res_folds, mode="w")
else:
run_results.to_csv("../results/"+res_folds, mode="a")
results_folds.append(run_results)
prev_run = run.copy()
results_folds = pd.concat(results_folds)
# In[ ]:
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/person.py | <filename>cbttc_proteomics/extract_configs/person.py
# flake8: noqa
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common.concept_schema import OMOP
from common.athena import athena_cache
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2'
}
def ethnicity_map(x):
value = x.lower()
if x.startswith('unavailable'):
value = omop_constants.CONCEPT.COMMON.UNAVAILABLE
else:
value = athena_cache.lookup(x, query_params={
'standardConcept': 'Standard',
'domain': 'Ethnicity'
})
return value
def race_map(x):
value = x.lower()
if x.startswith('other'):
value = omop_constants.CONCEPT.RACE.UNKNOWN
else:
value = athena_cache.lookup(x, query_params={
'standardConcept': 'Standard',
'domain': 'Race'
})
return value
operations = [
# person source value
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.SOURCE_VALUE
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# gender source value
keep_map(
in_col="gender",
out_col=OMOP.GENDER.SOURCE_VALUE
),
# gender concept id
value_map(
in_col="gender",
m=lambda x: athena_cache.lookup(x.lower(), query_params={
'standardConcept': 'Standard',
'domain': 'Gender'
}),
out_col=OMOP.GENDER.CONCEPT_ID
),
# gender source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.GENDER.SOURCE_CONCEPT_ID
),
# ethnicity source value
keep_map(
in_col="subject_ethnicity",
out_col=OMOP.ETHNICITY.SOURCE_VALUE
),
# ethnicity concept id
value_map(
in_col="subject_ethnicity",
m=lambda x: ethnicity_map(x),
out_col=OMOP.ETHNICITY.CONCEPT_ID
),
# ethnicity source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.ETHNICITY.SOURCE_CONCEPT_ID
),
# race source value
keep_map(
in_col='race',
out_col=OMOP.RACE.SOURCE_VALUE
),
# race concept id
value_map(
in_col='race',
m=lambda x: race_map(x),
out_col=OMOP.RACE.CONCEPT_ID
),
# race source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.RACE.SOURCE_CONCEPT_ID
),
# year of birth
constant_map(
m=0,
out_col=OMOP.MEASUREMENT.YEAR_OF_BIRTH
)
]
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/radiation.py | # flake8: noqa
import datetime
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map,
row_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common import athena
from common.concept_schema import OMOP
from common.athena import athena_cache
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
def post_load(df):
return df[df['radiation'] == 'Yes']
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2',
'do_after_load': post_load
}
def procedure_occur_id(row):
external_id = ''
components = ['research_id', 'radiation_type',
'start_age_rad', 'stop_age_rad']
external_id = '-'.join([f'{col}:{str(row[col])}' for col in components])
return external_id
def procedure(x):
if 'unavailable' in x.lower():
return omop_constants.CONCEPT.COMMON.UNAVAILABLE
else:
return athena_cache.lookup(x.lower().rstrip('s') + ' radiation')
def radiation_start(row):
x = row['start_age_rad'].lower()
if 'unavailable' not in x:
value = str(datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(x) - 1))
else:
value = str(
datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(row['age_of_initial_diagnosis']) - 1))
return value
operations = [
# observation external_id
row_map(
m=lambda row: procedure_occur_id(row),
out_col=OMOP.PROCEDURE.ID
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# procedure source value
row_map(
m=lambda row: (row['radiation_type'] + ' radiation at '
+ row['rad_site']),
out_col=OMOP.PROCEDURE.SOURCE_VALUE
),
# procedure concept id
value_map(
in_col='radiation_type',
m=lambda x: procedure(x),
out_col=OMOP.PROCEDURE.CONCEPT_ID
),
constant_map(
m=athena_cache.lookup('radiation',
query_params={'standardConcept': 'Standard'}),
out_col=OMOP.PROCEDURE.TYPE.CONCEPT_ID
),
# procedure_start_datetime
# consider epoch to be person's year of birth
row_map(
m=lambda row: radiation_start(row),
out_col=OMOP.PROCEDURE.DATETIME
),
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.PROCEDURE.MODIFIER.CONCEPT_ID
),
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.PROCEDURE.SOURCE_CONCEPT_ID
)
]
|
kids-first/kf-omop-imports | common/load.py | """
OMOP Loader
"""
import os
import logging
from pprint import pformat, pprint
from copy import deepcopy
from kf_lib_data_ingest.common.misc import read_json, write_json
from kf_model_omop.factory import scoped_session
from kf_model_omop.model import models
from common.target_api_config import schema
logger = logging.getLogger(__name__)
id_model_map = {
list(model_schema['_primary_key'].keys())[0]: model_name
for model_name, model_schema in schema.items()
if model_schema['_primary_key']
}
def _resolve_links(params, id_cache):
"""
Look up the value of foreign key properties in the id_cache by the source
ID or the value used to uniquely identify instances of model_name in the
source data
:param model_name: the SQLAlchemy model class name
:param params: the dict of properties and values needed to create an
instance of model
:param id_cache: a dict storing the mapping of source IDs to primary keys
"""
key = '_links'
for property, value in params[key].items():
foreign_model = id_model_map.get(property)
id_fk_map = id_cache.get(foreign_model)
if id_fk_map:
params[key][property] = id_fk_map.get(value, value)
params.update(params.pop('_links', None))
return params
def _resolve_primary_key(model_name, params, id_cache):
"""
Look up the value of the primary key in the id_cache by its source ID
or the value used to uniquely identify instances of model_name in the
source data
:param model_name: the SQLAlchemy model class name
:param params: the dict of properties and values needed to create an
instance of model
:param id_cache: a dict storing the mapping of source IDs to primary keys
"""
for k, v in params['_primary_key'].items():
primary_key = k
source_id = str(v)
primary_key_value = id_cache[model_name].get(source_id)
params.pop('_primary_key', None)
return primary_key, source_id, primary_key_value,
def _fill_values(target_schema, row):
"""
Helper for load. Fills values of properties in the target schema given
values from a row in the mapped dataframe
"""
params = deepcopy(target_schema)
for key, value in params.items():
if key not in {'_links', '_primary_key'}:
params[key] = row.get(value)
for key in {'_links', '_primary_key'}:
if params.get(key) is None:
continue
for property, value in params[key].items():
params[key][property] = row.get(value)
return params
def load(session, df_dict, id_cache, include_set):
"""
Create instances of SQLAlchemy models populated with data from df_dict
and update or insert them in the OMOP database
:param session: the current db session
:param df_dict: dict of dataframes keyed by model_name
:param id_cache: dict storing source ID to primary key mapping
"""
for model_cls_name, df in df_dict.items():
if (include_set is not None) and model_cls_name not in include_set:
logging.info(f'Skipping loading of {model_cls_name}')
continue
# Lookup model class by name
model_cls = getattr(models, model_cls_name)
# Init id cache if needed
if model_cls_name not in id_cache:
id_cache[model_cls_name] = {}
total = df.shape[0]
logger.info(f'Loading {total} {model_cls_name} instances ...')
# Get schema for target model
target_schema = deepcopy(schema.get(model_cls_name))
# Make model instances
for i, row in df.iterrows():
# Fill property values
params = _fill_values(target_schema, row)
# Translate values of link properties to db IDs
result = _resolve_links(params, id_cache)
(primary_key,
source_id,
primary_key_value) = _resolve_primary_key(model_cls_name,
result,
id_cache)
logger.debug(f'\tAttempt load {i} of {total} {model_cls_name}: '
f'\n{pformat(result)}')
logger.debug(f'{source_id} has pk {primary_key} = {primary_key_value}')
# Create or update model instance
instance = None
if primary_key_value:
instance = session.query(model_cls).get(primary_key_value)
if instance:
operation = 'Updated'
for property, value in result.items():
setattr(instance, property, value)
session.flush()
else:
operation = 'Created'
# Reuse previously generated primary keys
if primary_key_value:
result[primary_key] = primary_key_value
instance = model_cls(**result)
session.add(instance)
session.flush()
# Update id cache
primary_key_value = getattr(instance, primary_key)
id_cache[model_cls_name][source_id] = primary_key_value
logger.info(f'\t{operation} {i} of {total} {model_cls_name}: '
f'\n{pformat(result)}')
session.commit()
def run(df_dict, id_cache_filepath, include_set=None):
"""
Entry point into the loader
"""
logger.info('BEGIN LoadStage ...')
# Read id cache
id_cache = {}
if os.path.isfile(id_cache_filepath):
id_cache = read_json(id_cache_filepath)
# Use the context managed session to interact with DB
with scoped_session() as session:
load(session, df_dict, id_cache, include_set)
# Write id cache
write_json(id_cache, id_cache_filepath)
logger.info('END LoadStage ...')
|
kids-first/kf-omop-imports | cbttc_proteomics/transform.py | import pandas as pd
def transform(dfs):
# Person
persons = dfs['person']
# Specimens
specimens = dfs['specimen']
# Diagnoses - condition occurrence
diagnoses = dfs['diagnosis']
# Outcomes
outcomes = dfs['outcome']
# Radiations
radiations = dfs['radiation']
# Chemotherapies
chemos = dfs['chemotherapy']
procedure_occurs = pd.concat([radiations, chemos], ignore_index=True)
df_out = {
'Person': persons,
'ConditionOccurrence': diagnoses,
'Speciman': specimens,
'Observation': outcomes,
'ProcedureOccurrence': procedure_occurs
}
return df_out
|
kids-first/kf-omop-imports | villain_2015/extract_configs/NICHD_GMKF_DSD.py | # flake8: noqa
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common.concept_schema import OMOP
source_data_url = (
'file://~/Projects/kids_first/data/Vilain_2015/dbgap/NICHD_GMKF_DSD.xlsx'
)
source_data_loading_parameters = {}
def gender_map(x):
m = {
"female": omop_constants.CONCEPT.GENDER.FEMALE,
"male": omop_constants.CONCEPT.GENDER.MALE,
"default": omop_constants.CONCEPT.GENDER.UNKNOWN
}
return m.get(x, m.get('default'))
operations = [
# person source value
keep_map(
in_col='submitted_subject_id_s',
out_col=OMOP.PERSON.SOURCE_VALUE
),
# person external_id
keep_map(
in_col='submitted_subject_id_s',
out_col=OMOP.PERSON.ID
),
# specimen source value
keep_map(
in_col='biospecimen_repository_sample_id_s',
out_col=OMOP.SPECIMEN.ID
),
# specimen datetime
keep_map(
in_col='LoadDate_s',
out_col=OMOP.SPECIMEN.DATETIME
),
# gender source value
value_map(
in_col="sex_s",
m={
"female": kf_constants.GENDER.FEMALE,
"male": kf_constants.GENDER.MALE
},
out_col=OMOP.GENDER.SOURCE_VALUE
),
# gender concept id
value_map(
in_col="sex_s",
m=lambda x: gender_map(x),
out_col=OMOP.GENDER.CONCEPT_ID
),
# gender source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.GENDER.SOURCE_CONCEPT_ID
),
# ethnicity source value
constant_map(
m=kf_constants.COMMON.NOT_REPORTED,
out_col=OMOP.ETHNICITY.SOURCE_VALUE
),
# ethnicity concept id
constant_map(
m=omop_constants.CONCEPT.ETHNICITY.UNKNOWN,
out_col=OMOP.ETHNICITY.CONCEPT_ID
),
# ethnicity source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.ETHNICITY.SOURCE_CONCEPT_ID
),
# race source value
constant_map(
m=kf_constants.COMMON.NOT_REPORTED,
out_col=OMOP.RACE.SOURCE_VALUE
),
# race concept id
constant_map(
m=omop_constants.CONCEPT.RACE.UNKNOWN,
out_col=OMOP.RACE.CONCEPT_ID
),
# race source concept id (why??)
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.RACE.SOURCE_CONCEPT_ID
),
# year of birth
constant_map(
m=0,
out_col=OMOP.MEASUREMENT.YEAR_OF_BIRTH
)
]
|
kids-first/kf-omop-imports | common/target_api_config.py | <gh_stars>0
# flake8: noqa
"""
OMOP Target Model config
"""
from common.concept_schema import OMOP
schema = {
"CareSite": {
"_links": {
"location_id": None,
"place_of_service_concept_id": None
},
"_primary_key": {
"care_site_id": None
},
"care_site_name": None,
"care_site_source_value": None,
"place_of_service_source_value": None
},
"Concept": {
"_links": {
"concept_class_id": None,
"domain_id": None,
"vocabulary_id": None
},
"_primary_key": {
"concept_id": None
},
"concept_code": None,
"concept_name": None,
"invalid_reason": None,
"standard_concept": None,
"valid_end_date": None,
"valid_start_date": None
},
"ConceptAncestor": {
"_links": {
"ancestor_concept_id": None,
"descendant_concept_id": None
},
"_primary_key": {},
"max_levels_of_separation": None,
"min_levels_of_separation": None
},
"ConceptClass": {
"_links": {
"concept_class_concept_id": None
},
"_primary_key": {
"concept_class_id": None
},
"concept_class_name": None
},
"ConceptRelationship": {
"_links": {
"concept_id_1": None,
"concept_id_2": None,
"relationship_id": None
},
"_primary_key": {},
"invalid_reason": None,
"valid_end_date": None,
"valid_start_date": None
},
"ConditionEra": {
"_links": {
"condition_concept_id": None,
"person_id": None
},
"_primary_key": {
"condition_era_id": None
},
"condition_era_end_datetime": None,
"condition_era_start_datetime": None,
"condition_occurrence_count": None
},
"ConditionOccurrence": {
"_links": {
"condition_concept_id": OMOP.CONDITION.CONCEPT_ID,
"condition_source_concept_id": OMOP.CONDITION.SOURCE_CONCEPT_ID,
"condition_status_concept_id": OMOP.CONDITION.STATUS.CONCEPT_ID,
"condition_type_concept_id": OMOP.CONDITION.TYPE.CONCEPT_ID,
"person_id": OMOP.PERSON.ID,
"provider_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"condition_occurrence_id": OMOP.CONDITION.ID
},
"condition_end_date": None,
"condition_end_datetime": None,
"condition_source_value": OMOP.CONDITION.SOURCE_VALUE,
"condition_start_date": None,
"condition_start_datetime": OMOP.CONDITION.DATETIME,
"condition_status_source_value": OMOP.CONDITION.STATUS.SOURCE_VALUE,
"stop_reason": None
},
"Cost": {
"_links": {
"cost_concept_id": None,
"cost_source_concept_id": None,
"cost_type_concept_id": None,
"currency_concept_id": None,
"drg_concept_id": None,
"payer_plan_period_id": None,
"person_id": None,
"revenue_code_concept_id": None
},
"_primary_key": {
"cost_id": None
},
"billed_date": None,
"cost": None,
"cost_event_field_concept_id": None,
"cost_event_id": None,
"cost_source_value": None,
"drg_source_value": None,
"incurred_date": None,
"paid_date": None,
"revenue_code_source_value": None
},
"DeviceExposure": {
"_links": {
"device_concept_id": None,
"device_source_concept_id": None,
"device_type_concept_id": None,
"person_id": None,
"provider_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"device_exposure_id": None
},
"device_exposure_end_date": None,
"device_exposure_end_datetime": None,
"device_exposure_start_date": None,
"device_exposure_start_datetime": None,
"device_source_value": None,
"quantity": None,
"unique_device_id": None
},
"Domain": {
"_links": {
"domain_concept_id": None
},
"_primary_key": {
"domain_id": None
},
"domain_name": None
},
"DoseEra": {
"_links": {
"drug_concept_id": None,
"person_id": None,
"unit_concept_id": None
},
"_primary_key": {
"dose_era_id": None
},
"dose_era_end_datetime": None,
"dose_era_start_datetime": None,
"dose_value": None
},
"DrugEra": {
"_links": {
"drug_concept_id": None,
"person_id": None
},
"_primary_key": {
"drug_era_id": None
},
"drug_era_end_datetime": None,
"drug_era_start_datetime": None,
"drug_exposure_count": None,
"gap_days": None
},
"DrugExposure": {
"_links": {
"drug_concept_id": None,
"drug_source_concept_id": None,
"drug_type_concept_id": None,
"person_id": None,
"provider_id": None,
"route_concept_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"drug_exposure_id": None
},
"days_supply": None,
"dose_unit_source_value": None,
"drug_exposure_end_date": None,
"drug_exposure_end_datetime": None,
"drug_exposure_start_date": None,
"drug_exposure_start_datetime": None,
"drug_source_value": None,
"lot_number": None,
"quantity": None,
"refills": None,
"route_source_value": None,
"sig": None,
"stop_reason": None,
"verbatim_end_date": None
},
"DrugStrength": {
"_links": {
"amount_unit_concept_id": None,
"denominator_unit_concept_id": None,
"drug_concept_id": None,
"ingredient_concept_id": None,
"numerator_unit_concept_id": None
},
"_primary_key": {},
"amount_value": None,
"box_size": None,
"denominator_value": None,
"invalid_reason": None,
"numerator_value": None,
"valid_end_date": None,
"valid_start_date": None
},
"Location": {
"_links": {},
"_primary_key": {
"location_id": None
},
"address_1": None,
"address_2": None,
"city": None,
"country": None,
"county": None,
"latitude": None,
"location_source_value": None,
"longitude": None,
"state": None,
"zip": None
},
"LocationHistory": {
"_links": {
"location_id": None,
"relationship_type_concept_id": None
},
"_primary_key": {
"location_history_id": None
},
"domain_id": None,
"end_date": None,
"entity_id": None,
"start_date": None
},
"Measurement": {
"_links": {
"measurement_concept_id": None,
"measurement_source_concept_id": None,
"measurement_type_concept_id": None,
"operator_concept_id": None,
"person_id": None,
"provider_id": None,
"unit_concept_id": None,
"value_as_concept_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"measurement_id": None
},
"measurement_date": None,
"measurement_datetime": None,
"measurement_source_value": None,
"measurement_time": None,
"range_high": None,
"range_low": None,
"unit_source_value": None,
"value_as_number": None,
"value_source_value": None
},
"Note": {
"_links": {
"encoding_concept_id": None,
"language_concept_id": None,
"note_class_concept_id": None,
"note_type_concept_id": None,
"person_id": None,
"provider_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"note_id": None
},
"note_date": None,
"note_datetime": None,
"note_event_field_concept_id": None,
"note_event_id": None,
"note_source_value": None,
"note_text": None,
"note_title": None
},
"NoteNlp": {
"_links": {
"note_id": None,
"note_nlp_concept_id": None,
"note_nlp_source_concept_id": None,
"section_concept_id": None
},
"_primary_key": {
"note_nlp_id": None
},
"lexical_variant": None,
"nlp_date": None,
"nlp_datetime": None,
"nlp_system": None,
"offset": None,
"snippet": None,
"term_exists": None,
"term_modifiers": None,
"term_temporal": None
},
"Observation": {
"_links": {
"observation_concept_id": OMOP.OBSERVATION.CONCEPT_ID,
"observation_source_concept_id": OMOP.OBSERVATION.SOURCE_CONCEPT_ID,
"observation_type_concept_id": OMOP.OBSERVATION.TYPE.CONCEPT_ID,
"person_id": OMOP.PERSON.ID,
"provider_id": None,
"qualifier_concept_id": None,
"unit_concept_id": None,
"value_as_concept_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None,
"obs_event_field_concept_id": OMOP.OBSERVATION.EVENT_FIELD.CONCEPT_ID,
},
"_primary_key": {
"observation_id": OMOP.OBSERVATION.ID
},
"observation_date": None,
"observation_datetime": OMOP.OBSERVATION.DATETIME,
"observation_event_id": None,
"observation_source_value": OMOP.OBSERVATION.SOURCE_VALUE,
"qualifier_source_value": None,
"unit_source_value": None,
"value_as_datetime": None,
"value_as_number": None,
"value_as_string": None
},
"ObservationPeriod": {
"_links": {
"period_type_concept_id": None,
"person_id": None
},
"_primary_key": {
"observation_period_id": None
},
"observation_period_end_date": None,
"observation_period_start_date": None
},
"PayerPlanPeriod": {
"_links": {
"contract_concept_id": None,
"contract_person_id": None,
"contract_source_concept_id": None,
"payer_concept_id": None,
"payer_source_concept_id": None,
"person_id": None,
"plan_concept_id": None,
"plan_source_concept_id": None,
"sponsor_concept_id": None,
"sponsor_source_concept_id": None,
"stop_reason_concept_id": None,
"stop_reason_source_concept_id": None
},
"_primary_key": {
"payer_plan_period_id": None
},
"contract_source_value": None,
"family_source_value": None,
"payer_plan_period_end_date": None,
"payer_plan_period_start_date": None,
"payer_source_value": None,
"plan_source_value": None,
"sponsor_source_value": None,
"stop_reason_source_value": None
},
"Person": {
"_links": {
"care_site_id": None,
"ethnicity_concept_id": OMOP.ETHNICITY.CONCEPT_ID,
"ethnicity_source_concept_id": OMOP.ETHNICITY.SOURCE_CONCEPT_ID,
"gender_concept_id": OMOP.GENDER.CONCEPT_ID,
"gender_source_concept_id": OMOP.GENDER.SOURCE_CONCEPT_ID,
"location_id": None,
"provider_id": None,
"race_concept_id": OMOP.RACE.CONCEPT_ID,
"race_source_concept_id": OMOP.RACE.SOURCE_CONCEPT_ID
},
"_primary_key": {
"person_id": OMOP.PERSON.ID
},
"birth_datetime": None,
"day_of_birth": None,
"death_datetime": None,
"ethnicity_source_value": OMOP.ETHNICITY.SOURCE_VALUE,
"gender_source_value": OMOP.GENDER.SOURCE_VALUE,
"month_of_birth": None,
"person_source_value": OMOP.PERSON.SOURCE_VALUE,
"race_source_value": OMOP.RACE.SOURCE_VALUE,
"year_of_birth": OMOP.MEASUREMENT.YEAR_OF_BIRTH
},
"ProcedureOccurrence": {
"_links": {
"modifier_concept_id": OMOP.PROCEDURE.MODIFIER.CONCEPT_ID,
"person_id": OMOP.PERSON.ID,
"procedure_concept_id": OMOP.PROCEDURE.CONCEPT_ID,
"procedure_source_concept_id": OMOP.PROCEDURE.SOURCE_CONCEPT_ID,
"procedure_type_concept_id": OMOP.PROCEDURE.TYPE.CONCEPT_ID,
"provider_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"procedure_occurrence_id": OMOP.PROCEDURE.ID
},
"modifier_source_value": None,
"procedure_date": None,
"procedure_datetime": OMOP.PROCEDURE.DATETIME,
"procedure_source_value": None,
"quantity": None
},
"Provider": {
"_links": {
"care_site_id": None,
"gender_concept_id": None,
"gender_source_concept_id": None,
"specialty_concept_id": None,
"specialty_source_concept_id": None
},
"_primary_key": {
"provider_id": None
},
"dea": None,
"gender_source_value": None,
"npi": None,
"provider_name": None,
"provider_source_value": None,
"specialty_source_value": None,
"year_of_birth": None
},
"Relationship": {
"_links": {
"relationship_concept_id": None,
"reverse_relationship_id": None
},
"_primary_key": {
"relationship_id": None
},
"defines_ancestry": None,
"is_hierarchical": None,
"relationship_name": None
},
"SourceToConceptMap": {
"_links": {
"source_vocabulary_id": None,
"target_concept_id": None,
"target_vocabulary_id": None
},
"_primary_key": {
"source_code": None,
"valid_end_date": None
},
"invalid_reason": None,
"source_code_description": None,
"source_concept_id": None,
"valid_start_date": None
},
"Speciman": {
"_links": {
"person_id": OMOP.PERSON.ID,
"specimen_concept_id": OMOP.SPECIMEN.CONCEPT_ID,
"anatomic_site_concept_id": OMOP.SPECIMEN.ANATOMIC_SITE.CONCEPT_ID,
"disease_status_concept_id": OMOP.SPECIMEN.DISEASE_STATUS.CONCEPT_ID,
"specimen_type_concept_id": OMOP.SPECIMEN.TYPE.CONCEPT_ID,
"unit_concept_id": None,
},
"_primary_key": {
"specimen_id": OMOP.SPECIMEN.ID
},
"anatomic_site_source_value": OMOP.SPECIMEN.ANATOMIC_SITE.SOURCE_VALUE,
"disease_status_source_value": OMOP.SPECIMEN.DISEASE_STATUS.SOURCE_VALUE,
"quantity": None,
"specimen_date": None,
"specimen_datetime": OMOP.SPECIMEN.DATETIME,
"specimen_source_id": OMOP.SPECIMEN.ID,
"specimen_source_value": OMOP.SPECIMEN.SOURCE_VALUE,
"unit_source_value": None
},
"SurveyConduct": {
"_links": {
"assisted_concept_id": None,
"collection_method_concept_id": None,
"person_id": None,
"provider_id": None,
"respondent_type_concept_id": None,
"response_visit_occurrence_id": None,
"survey_concept_id": None,
"survey_source_concept_id": None,
"timing_concept_id": None,
"validated_survey_concept_id": None,
"visit_detail_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"survey_conduct_id": None
},
"assisted_source_value": None,
"collection_method_source_value": None,
"respondent_type_source_value": None,
"survey_end_date": None,
"survey_end_datetime": None,
"survey_source_identifier": None,
"survey_source_value": None,
"survey_start_date": None,
"survey_start_datetime": None,
"survey_version_number": None,
"timing_source_value": None,
"validated_survey_source_value": None
},
"VisitDetail": {
"_links": {
"admitted_from_concept_id": None,
"care_site_id": None,
"discharge_to_concept_id": None,
"person_id": None,
"preceding_visit_detail_id": None,
"provider_id": None,
"visit_detail_concept_id": None,
"visit_detail_parent_id": None,
"visit_detail_source_concept_id": None,
"visit_detail_type_concept_id": None,
"visit_occurrence_id": None
},
"_primary_key": {
"visit_detail_id": None
},
"admitted_from_source_value": None,
"discharge_to_source_value": None,
"visit_detail_end_date": None,
"visit_detail_end_datetime": None,
"visit_detail_source_value": None,
"visit_detail_start_date": None,
"visit_detail_start_datetime": None
},
"VisitOccurrence": {
"_links": {
"admitted_from_concept_id": None,
"care_site_id": None,
"discharge_to_concept_id": None,
"person_id": None,
"preceding_visit_occurrence_id": None,
"provider_id": None,
"visit_concept_id": None,
"visit_source_concept_id": None,
"visit_type_concept_id": None
},
"_primary_key": {
"visit_occurrence_id": None
},
"admitted_from_source_value": None,
"discharge_to_source_value": None,
"visit_end_date": None,
"visit_end_datetime": None,
"visit_source_value": None,
"visit_start_date": None,
"visit_start_datetime": None
},
"Vocabulary": {
"_links": {
"vocabulary_concept_id": None
},
"_primary_key": {
"vocabulary_id": None
},
"vocabulary_name": None,
"vocabulary_reference": None,
"vocabulary_version": None
}
}
|
kids-first/kf-omop-imports | common/constants.py | """
OMOP Constants
"""
class CONCEPT:
class DIAGNOSIS:
PRELIMINARY = 3046688
RECURRENCE = 4082492
PROGRESSIVE = 4114504
class SPECIMEN:
class ANATOMIC_SITE:
BRAIN_STEM_PART = 4001043
class SPINAL_CORD:
THORACIC = 4176257
CERVICAL = 4183411
LUMBAR = 4244556
class COMPOSITION:
BLOOD = 4001225
BRAIN_TISSUE = 4134448
class ANALYTE:
RNA = 4103715
DNA = 0
class TYPE:
TUMOR = 4122248
class OUTCOME:
class VITAL_STATUS:
DECEASED = 9176
class DISEASE_STATUS:
ABNORMAL = 4135493
NORMAL = 4069590
class COMMON:
NO_MATCH = 0
UNAVAILABLE = 45884388
class GENDER:
FEMALE = 8532
MALE = 8507
UNKNOWN = 8551
class ETHNICITY:
HISPANIC = 38003563
NOT_HISPANIC = 38003564
UNKNOWN = 759814
class RACE:
WHITE = 8527
AFRICAN = 8516
ASIAN = 8515
PACIFIC_ISLANDER = 8557
AMERICAN_INDIAN = 8657
UNKNOWN = 8552
|
kids-first/kf-omop-imports | common/athena.py | import os
import logging
import requests
import pandas as pd
from urllib.parse import urlencode
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from kf_lib_data_ingest.common.misc import (
read_json,
write_json
)
from common.constants import CONCEPT
API_URL = 'http://athena.ohdsi.org/api/v1/concepts'
ATHENA_CACHE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'athena_cache.json')
logger = logging.getLogger(__name__)
class AthenaCache(object):
def __init__(self):
self.athena_cache = {}
if os.path.isfile(ATHENA_CACHE_FILE):
self.athena_cache = read_json(ATHENA_CACHE_FILE)
def lookup(self, source_value, use_cache=True, query_params={}):
"""
Lookup a concept by name via the Athena standard vocabulary API. First
check local term cache stored in a JSON file.
"""
concept = None
logger.debug(f'Looking up standard concept for {source_value} ...')
# Check local cache first
if (use_cache and
self.athena_cache and
(source_value in self.athena_cache)):
concept = self.athena_cache.get(source_value)
logger.debug(f'Found concept_id {concept} for {source_value}'
' in local cache')
# Lookup term via Athena API
else:
query_params.update({'query': source_value.lower()})
query_string = urlencode(query_params)
response = requests.get(f'{API_URL}?{query_string}')
if response.status_code == 200:
result = response.json()
# Apply fuzzy string match and filters
athena_results = result['content']
if athena_results:
concept = self._choose_best(source_value, athena_results)
logger.debug(
f'Found concept_id {concept} for {source_value}'
f' via Athena API {API_URL}')
else:
logger.info(response.text())
if concept:
concept['id'] = int(concept['id'])
concept_id = concept['id']
self.athena_cache[source_value] = concept
else:
concept_id = CONCEPT.COMMON.NO_MATCH
logger.info(
f'Could not find standard concept for {source_value}'
f' via Athena API {API_URL}')
return int(concept_id)
def write_cache(self):
"""
Write the cache which stores the mappings of source value to standard
concepts to a JSON file
"""
if self.athena_cache:
write_json(self.athena_cache, ATHENA_CACHE_FILE)
def _choose_best(self, source_value, athena_results,
use_standard_concept=False):
"""
Apply fuzzy text search to Athena standard concept results. Athena API
seems to use ILIKE and not fuzzy text string matching when searching
for standard concepts that match the search term.
Apply additional optional filters (i.e domain == Condition). Filters
are ANDed together.
:param athena_results: list of dicts containing standard concepts
returned by Athena concept search API
"""
concept = athena_results[0]
# Fuzzy text search
fuzzy_results = process.extract(source_value, athena_results,
scorer=fuzz.token_sort_ratio)
fuzzy_results_df = pd.DataFrame([r[0] for r in fuzzy_results])
# Apply filters
filtered_df = fuzzy_results_df[
fuzzy_results_df['invalidReason'] == 'Valid']
if use_standard_concept:
filters = [('standardConcept', 'Standard')]
for f in filters:
if not filtered_df.empty:
filtered_df = filtered_df[filtered_df[f[0]] == f[1]]
# Choose best result
if not filtered_df.empty:
concept = filtered_df.iloc[0].to_dict()
elif not fuzzy_results_df.empty:
concept = fuzzy_results_df.iloc[0].to_dict()
return concept
athena_cache = AthenaCache()
|
kids-first/kf-omop-imports | common/etl.py | import os
from common import extract
from common import transform
from common import load
def run(study_dir, output_dir, transform_func):
include_set = None
# Extract stage
config_dir = os.path.join(study_dir, 'extract_configs')
extract_configs = [os.path.join(config_dir, fname)
for fname in os.listdir(config_dir)
if os.path.isfile(os.path.join(config_dir, fname))]
data_dict = extract.run(output_dir, extract_configs)
# Transform
df_dict = transform.run(output_dir, data_dict, transform_func)
# Load
id_cache_file = os.path.join(study_dir, 'id_cache.json')
load.run(df_dict, id_cache_file, include_set=include_set)
|
kids-first/kf-omop-imports | villain_2015/extract_configs/3a_sample_attributes.py | # flake8: noqa
from pandas import read_csv
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common.concept_schema import OMOP
source_data_url = (
'file://~/Projects/kids_first/data/Vilain_2015/dbgap/3a_dbGaP_SampleAttributesDSproofed.txt'
)
source_data_loading_parameters = {
'load_func': read_csv,
'sep': '\t'
}
def body_site_map(x):
m = {
"B": omop_constants.CONCEPT.SPECIMEN.COMPOSITION.BLOOD,
"FTL": omop_constants.CONCEPT.COMMON.NO_MATCH,
"FTU": omop_constants.CONCEPT.COMMON.NO_MATCH,
"S": omop_constants.CONCEPT.COMMON.NO_MATCH,
"TP": omop_constants.CONCEPT.COMMON.NO_MATCH,
"U": omop_constants.CONCEPT.COMMON.NO_MATCH,
"default": omop_constants.CONCEPT.COMMON.NO_MATCH
}
return m.get(x, m.get('default'))
operations = [
# specimen source id and external_id
keep_map(
in_col='SAMPLE_ID',
out_col=OMOP.SPECIMEN.ID
),
# specimen source value
keep_map(
in_col="BODY_SITE",
out_col=OMOP.SPECIMEN.SOURCE_VALUE
),
# specimen concept id
value_map(
in_col="BODY_SITE",
m=lambda x: body_site_map(x),
out_col=OMOP.SPECIMEN.CONCEPT_ID
),
# specimen type concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.SPECIMEN.TYPE.CONCEPT_ID
),
# specimen anatomic concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.SPECIMEN.ANATOMIC_SITE.CONCEPT_ID
),
# specimen anatomic source value
constant_map(
m=kf_constants.COMMON.NOT_REPORTED,
out_col=OMOP.SPECIMEN.ANATOMIC_SITE.SOURCE_VALUE
),
# specimen disease status concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.NO_MATCH,
out_col=OMOP.SPECIMEN.DISEASE_STATUS.CONCEPT_ID
),
# specimen disease status source value
constant_map(
m=kf_constants.COMMON.NOT_REPORTED,
out_col=OMOP.SPECIMEN.DISEASE_STATUS.SOURCE_VALUE
),
]
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/chemotherapy.py | <reponame>kids-first/kf-omop-imports
# flake8: noqa
import datetime
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map,
row_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common import athena
from common.concept_schema import OMOP
from common.athena import athena_cache
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
def post_load(df):
return df[df['chemo'] == 'Yes']
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2',
'do_after_load': post_load
}
def procedure_occur_id(row):
external_id = ''
components = ['research_id', 'chemo_type',
'start_age_chemo', 'stop_age_chemo']
external_id = '-'.join([f'{col}:{str(row[col])}' for col in components])
return external_id
def chemo_start(row):
x = row['start_age_chemo'].lower()
if ('unavailable' not in x) and ('not applicable') in x:
value = str(datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(x) - 1))
else:
value = str(
datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(row['age_of_initial_diagnosis']) - 1))
return value
operations = [
# procedure external_id
row_map(
m=lambda row: procedure_occur_id(row),
out_col=OMOP.PROCEDURE.ID
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# procedure source value
row_map(
m=lambda row: ('Chemotherapy ' +
':'.join([f'{col}: {row[col]}'
for col in ['chemo_type',
'like_protocol_name',
'Chemo_agents',
'Formulation']]
)
),
out_col=OMOP.PROCEDURE.SOURCE_VALUE
),
# procedure concept id
constant_map(
m=athena_cache.lookup('chemotherapy',
query_params={'domain': 'Procedure',
'standardConcept': 'Standard',
'conceptClass': 'Procedure'}),
out_col=OMOP.PROCEDURE.CONCEPT_ID
),
# procedure type
constant_map(
m=athena_cache.lookup('radiation',
query_params={'standardConcept': 'Standard'}),
out_col=OMOP.PROCEDURE.TYPE.CONCEPT_ID
),
# procedure_start_datetime
# consider epoch to be person's year of birth
row_map(
m=lambda row: chemo_start(row),
out_col=OMOP.PROCEDURE.DATETIME
),
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.PROCEDURE.MODIFIER.CONCEPT_ID
),
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.PROCEDURE.SOURCE_CONCEPT_ID
)
]
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/outcome.py | # flake8: noqa
import datetime
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map,
row_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common import athena
from common.concept_schema import OMOP
from common.athena import athena_cache
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2'
}
def observation_id(row):
external_id = ''
components = ['research_id', 'age_of_last_known_status',
'last_known_status']
external_id = '-'.join([f'{col}:{str(row[col])}' for col in components])
return external_id
def vital_status(x):
if 'alive' in x.lower():
value = athena_cache.lookup(x.split('-')[0])
else:
value = omop_constants.CONCEPT.OUTCOME.VITAL_STATUS.DECEASED
return value
operations = [
# observation external_id
row_map(
m=lambda row: observation_id(row),
out_col=OMOP.OBSERVATION.ID
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# observation source value
keep_map(
in_col='last_known_status',
out_col=OMOP.OBSERVATION.SOURCE_VALUE
),
# observation concept id
value_map(
in_col='last_known_status',
m=lambda x: int(vital_status(x)),
out_col=OMOP.OBSERVATION.CONCEPT_ID
),
# observation_start_datetime
# consider epoch to be person's year of birth
value_map(
in_col='age_of_last_known_status',
m=lambda x: str(datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(x) - 1)),
out_col=OMOP.OBSERVATION.DATETIME
),
# observation_type_concept_id
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.OBSERVATION.TYPE.CONCEPT_ID
),
# observation source concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.OBSERVATION.SOURCE_CONCEPT_ID
),
# obs_event_field_concept_id ?? what on earth is this
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.OBSERVATION.EVENT_FIELD.CONCEPT_ID
)
]
|
kids-first/kf-omop-imports | common/delete.py | """
Delete stuff
"""
import os
import logging
from kf_lib_data_ingest.common.misc import read_json
from kf_model_omop.factory import scoped_session
from kf_model_omop.model import models
logger = logging.getLogger(__name__)
ordered_model_list = ['Person', 'Speciman', 'Observation',
'ConditionOccurrence',
'ProcedureOccurrence']
def delete_all(session):
"""
Delete all instances of models in df_dict's keys
"""
logger.info('Deleting all previously loaded OMOP instances')
for model_cls_name in reversed(ordered_model_list):
model_cls = getattr(models, model_cls_name)
results = session.query(model_cls).all()
logger.info(f'{len(results)} {model_cls.__name__} deleted')
for r in results:
session.delete(r)
session.commit()
def drop_study(session, id_cache):
"""
Delete all study entities
"""
model_name_list = reversed(ordered_model_list)
for model_cls_name in model_name_list:
# Lookup model class by name
model_cls = getattr(models, model_cls_name)
# Primary Keys
primary_keys = id_cache.get(model_cls_name, [])
if (not primary_keys) or (session.query(model_cls).count() == 0):
logger.info(f'0 {model_cls_name} found, nothing to delete!')
continue
# Query for obj then delete it
for key in primary_keys.values():
instance = session.query(model_cls).get(key)
if instance:
logger.info(f'Deleting {model_cls_name} {key} ...')
session.delete(instance)
session.commit()
def run(study_dir):
"""
Entry point
"""
logger.info(f'Deleting study {study_dir} entities')
# Read id cache
id_cache = {}
id_cache_filepath = os.path.join(study_dir, 'id_cache.json')
if os.path.isfile(id_cache_filepath):
id_cache = read_json(id_cache_filepath)
# Use the context managed session to interact with DB
with scoped_session() as session:
drop_study(session, id_cache)
|
kids-first/kf-omop-imports | common/extract.py | import os
from kf_lib_data_ingest.etl.extract.extract import ExtractStage
from common.athena import athena_cache
def run(output_dir, extract_configs):
# Make output dir
output_dir = os.path.join(output_dir, 'extract')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Run
es = ExtractStage(output_dir, extract_configs)
df_out = es.run()
# Write output files
for url, (_, df) in df_out.items():
fname = os.path.split(url)[1].split('.')[0]
output_path = os.path.join(output_dir, (fname + '.tsv'))
df.to_csv(output_path, sep='\t')
# Write athena cache to file
athena_cache.write_cache()
return df_out
|
kids-first/kf-omop-imports | cli.py | <filename>cli.py<gh_stars>0
import os
import click
from kf_lib_data_ingest.etl.configuration.log import setup_logger
from kf_lib_data_ingest.common.misc import import_module_from_file
from kf_model_omop.utils.misc import time_it
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
"""
Simple CLI for ingesting data into the Kids First OMOP db
"""
pass
@click.command(name='ingest')
@click.argument('study_dir',
type=click.Path(file_okay=False, dir_okay=True))
@click.option('--log_level', type=click.Choice(['debug', 'info',
'warning', 'error']))
@time_it
def ingest(study_dir, log_level):
"""
Ingest a study into the Kids First OMOP db
\b
Arguments:
\b
study_dir - Path to study directory containing extract_configs dir
"""
from common import etl
# Setup logging
output_dir = _setup_output_and_logging(study_dir, log_level)
# Get transformer
study_transform = import_module_from_file(os.path.join(
os.path.abspath(study_dir), 'transform.py'))
# Run etl
etl.run(study_dir, output_dir, study_transform.transform)
@click.command(name='drop')
@click.argument('study_dir',
type=click.Path(file_okay=False, dir_okay=True))
@time_it
def drop(study_dir):
"""
Drop a study in the Kids First OMOP db
\b
Arguments:
\b
study_dir - Path to study directory containing the id_cache.json file
for the study
"""
from common import delete
# Setup logging
_setup_output_and_logging(study_dir)
# Drop
delete.run(os.path.abspath(study_dir))
def _setup_output_and_logging(study_dir, log_level=None):
# Create output directory for caching stage outputs
output_dir = os.path.join(study_dir, 'output')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Logger
kwargs = {
'overwrite_log': True
}
if log_level:
import logging
log_level = getattr(logging, log_level.upper())
kwargs.update({'log_level': log_level})
setup_logger(output_dir, **kwargs)
return output_dir
cli.add_command(ingest)
cli.add_command(drop)
|
kids-first/kf-omop-imports | common/concept_schema.py | <gh_stars>0
"""
OMOP concept schema
"""
from kf_lib_data_ingest.etl.transform.standard_model.concept_schema import (
PropertyMixin,
_set_cls_attrs
)
class OmopMixin(PropertyMixin):
SOURCE_VALUE = None
SOURCE_CONCEPT_ID = None
CONCEPT_ID = None
class TimeMixin():
START_DATETIME = None
STOP_DATETIME = None
DATETIME = None
class OMOP:
class PERSON(OmopMixin):
pass
class OBSERVATION(OmopMixin, TimeMixin):
class EVENT_FIELD(OmopMixin):
pass
class TYPE(OmopMixin):
pass
class PROCEDURE(OmopMixin, TimeMixin):
class MODIFIER(OmopMixin):
pass
class TYPE(OmopMixin):
pass
class CONDITION(OmopMixin):
DATETIME = None
class STATUS(OmopMixin):
pass
class TYPE(OmopMixin):
pass
class SPECIMEN(OmopMixin, TimeMixin):
class DISEASE_STATUS(OmopMixin):
pass
class ANATOMIC_SITE(OmopMixin):
pass
class TYPE(OmopMixin):
pass
class ETHNICITY(OmopMixin):
pass
class RACE(OmopMixin):
pass
class GENDER(OmopMixin):
pass
class MEASUREMENT:
YEAR_OF_BIRTH = None
def compile_schema():
"""
"Compile" the concept schema
Populate every concept class attribute with a string that represents
a path in the concept class hierarchy to reach that attribute.
Store all the concept property strings in a set for later reference and
validation.
This approach eliminates the need to manually assign concept class
attributes to a string.
"""
property_path = []
property_paths = set()
_set_cls_attrs(OMOP, None, property_path, property_paths)
return property_paths
compile_schema()
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/diagnosis.py | <filename>cbttc_proteomics/extract_configs/diagnosis.py
# flake8: noqa
import datetime
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map,
row_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common import athena
from common.concept_schema import OMOP
from common.athena import athena_cache
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2'
}
def condition_occur_id(row):
external_id = ''
components = ['research_id', 'sample_subject_name',
'diagnosis', 'diagnosis_type', 'age_of_initial_diagnosis',
'tumor_primary_location_in']
external_id = '-'.join([f'{col}:{str(row[col])}' for col in components])
return external_id
def condition_status(x):
if 'initial' in x.lower():
value = omop_constants.CONCEPT.DIAGNOSIS.PRELIMINARY
elif 'progressive' in x.lower():
value = omop_constants.CONCEPT.DIAGNOSIS.PROGRESSIVE
elif 'recurrence' in x.lower():
value = omop_constants.CONCEPT.DIAGNOSIS.RECURRENCE
else:
value = omop_constants.CONCEPT.COMMON.NO_MATCH
return value
operations = [
# condition_occurrence external_id
row_map(
m=lambda row: condition_occur_id(row),
out_col=OMOP.CONDITION.ID
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# condition source value
keep_map(
in_col='diagnosis',
out_col=OMOP.CONDITION.SOURCE_VALUE
),
# condition concept id
value_map(
in_col='diagnosis',
m=lambda x: int(athena_cache.lookup(
x.split('/')[0].split('(')[0].strip(),
query_params={'domain': 'Condition'})),
out_col=OMOP.CONDITION.CONCEPT_ID
),
# condition source concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.CONDITION.SOURCE_CONCEPT_ID
),
# condition_start_datetime
# consider epoch to be person's year of birth
value_map(
in_col='age_of_initial_diagnosis',
m=lambda x: str(datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(x) - 1)),
out_col=OMOP.CONDITION.DATETIME
),
# condition_type_concept_id
value_map(
in_col='grade',
m=lambda x: (omop_constants.CONCEPT.COMMON.UNAVAILABLE
if x.lower() == 'unavailable' else
int(athena_cache.lookup(x.split('/')[0]))
),
out_col=OMOP.CONDITION.TYPE.CONCEPT_ID
),
# condition_status_source_value
keep_map(
in_col='diagnosis_type',
out_col=OMOP.CONDITION.STATUS.SOURCE_VALUE
),
# condition_status_concept_id
value_map(
in_col='diagnosis_type',
m=lambda x: condition_status(x),
out_col=OMOP.CONDITION.STATUS.CONCEPT_ID
)
]
|
kids-first/kf-omop-imports | villain_2015/transform.py | <gh_stars>0
from common.concept_schema import OMOP
from common.utils import merge_without_duplicates
def transform(dfs):
# Make dataframes
# Person
persons = dfs['NICHD_GMKF_DSD']
# Specimens
specimens = dfs['3a_sample_attributes']
specimens = merge_without_duplicates(persons, specimens,
on=OMOP.SPECIMEN.ID)
specimens = specimens.drop_duplicates(OMOP.SPECIMEN.ID)
df_out = {
'Person': persons,
'Speciman': specimens
}
return df_out
|
kids-first/kf-omop-imports | common/utils.py | <filename>common/utils.py
import inspect
from sqlalchemy.inspection import inspect as sqlainspect
import pandas as pd
from kf_model_omop.model import models
def merge_without_duplicates(left, right, left_key='Left',
right_key='Right', **kwargs):
"""
Merge two DataFrames and remove duplicate columns resulting from merge
A merge of two DataFrames can result in duplicate columns suffixed with
_x and _y. Choose the column with the most values.
"""
df = None
# Ensure both DataFrames exist
error_messages = []
if not isinstance(left, pd.DataFrame) and (not left.empty):
error_messages.append('{} DataFrame is {}.'.format(left_key, left))
if not isinstance(right, pd.DataFrame) and (not right.empty):
error_messages.append('{} DataFrame is {}.'.format(right_key, right))
# One or both DataFrames do not exist
if error_messages:
msg = 'Warning: Could not merge DataFrames. '
additional_msgs = ' '.join(error_messages)
print(msg + additional_msgs)
return df
# Save duplicate columns
duplicate_cols = set(list(left.columns)).intersection(list(right.columns))
# Merge
try:
df = pd.merge(left, right, **kwargs)
# One of the DataFrames did not have the merge_col
except KeyError as e:
missing_col = str(e)
bad_df = (left_key
if missing_col not in set(list(left.columns))
else right_key)
print('Warning: Could not merge {0} DataFrame with {1} DataFrame '
'on column "{2}". "{2}" not found in columns of {3}'
.format(left_key, right_key, missing_col, bad_df))
raise e
# No common columns found to merge on
except pd.errors.MergeError as e:
print('Warning: Could not merge {0} DataFrame with {1} DataFrame. '
'No common columns to perform merge on.'.format(left_key,
right_key))
raise e
# Replace NaN values with None
df = df.where((pd.notnull(df)), None)
for prefix in duplicate_cols:
# Get duplicate columns
col_x = prefix + '_x'
col_y = prefix + '_y'
if not ((col_x in df) and (col_y in df)):
continue
# Keep column with greatest # of unique values (excluding NaN/None)
unique_vals_x = set(df[col_x].unique())
unique_vals_x.discard(None)
unique_vals_y = set(df[col_y].unique())
unique_vals_y.discard(None)
if len(unique_vals_x) >= len(unique_vals_y):
keep_col = col_x
drop_col = col_y
else:
keep_col = col_y
drop_col = col_x
# Reduce two duplicate columns to one
df.drop(drop_col, axis=1, inplace=True)
df.rename(columns={keep_col: prefix}, inplace=True)
return df
def _get_live_classes(module, full_module_name):
"""
Return list of classes that are defined in the python module
:param: an imported Python module
:param: fully qualified name of Python module (ie. my_package.my_module)
"""
return [getattr(module, m[0])
for m in inspect.getmembers(module, inspect.isclass)
if m[1].__module__ == full_module_name
]
def _make_omop_schema():
"""
Build a dictionary, where keys are OMOP sqlalchemy model names and values
are dictionaries of the model's attributes.
Example:
omop_schema = {
'CareSite': {
'care_site_id': None,
'care_site_name': None
},
...
}
"""
omop_schema = {}
classes = _get_live_classes(models, 'kf_model_omop.model.models')
for cls in classes:
d = {'_links': {},
'_primary_key': {}}
for c in sqlainspect(cls).columns:
if c.foreign_keys and (not c.primary_key):
d['_links'][c.name] = None
elif c.primary_key:
d['_primary_key'][c.name] = None
else:
d[c.name] = None
omop_schema[cls.__name__] = d
return omop_schema
|
kids-first/kf-omop-imports | cbttc_proteomics/extract_configs/specimen.py | <filename>cbttc_proteomics/extract_configs/specimen.py
# flake8: noqa
import logging
import datetime
import pandas as pd
from kf_lib_data_ingest.etl.extract.operations import (
value_map,
keep_map,
constant_map,
melt_map
)
from kf_lib_data_ingest.common import constants as kf_constants
from common import constants as omop_constants
from common.concept_schema import OMOP
from common.athena import athena_cache
logger = logging.getLogger(__name__)
source_data_url = (
'file://~/Projects/kids_first/data/CBTTC/proteomics/cbttc-proteomics.xlsx'
)
def post_load(df):
# reshape
value_vars = ['normal_dna_biospecimen_id',
'tumor_dna_biospecimen_id',
'tumor_rna_biospecimen_id']
id_vars = set(df.columns) - set(value_vars)
new_df = pd.melt(df, id_vars=id_vars,
value_vars=value_vars,
var_name='disease_status',
value_name='specimen_id')
return new_df
source_data_loading_parameters = {
'sheet_name': 'All Fields - Included - 11_05_2',
'do_after_load': post_load
}
def anatomic_site(x):
x = x.strip()
if 'Brain Stem- Midbrain' in x:
value = omop_constants.CONCEPT.SPECIMEN.ANATOMIC_SITE.BRAIN_STEM_PART
elif 'Spinal Cord- Thoracic' in x:
value = (
omop_constants.CONCEPT.SPECIMEN.ANATOMIC_SITE.SPINAL_CORD.THORACIC
)
elif 'Spinal Cord- Cervical' in x:
value = (
omop_constants.CONCEPT.SPECIMEN.ANATOMIC_SITE.SPINAL_CORD.CERVICAL
)
elif 'Spinal Cord- Lumbar' in x:
value = (
omop_constants.CONCEPT.SPECIMEN.ANATOMIC_SITE.SPINAL_CORD.CERVICAL
)
else:
value = athena_cache.lookup(x.split('/')[0].split(',')[0])
return value
operations = [
# specimen source value
keep_map(
in_col='specimen_id',
out_col=OMOP.SPECIMEN.ID
),
# specimen source value
keep_map(
in_col='sample_subject_name',
out_col=OMOP.SPECIMEN.SOURCE_VALUE
),
# person external_id
keep_map(
in_col='research_id',
out_col=OMOP.PERSON.ID
),
# specimen concept id
constant_map(
m=omop_constants.CONCEPT.SPECIMEN.COMPOSITION.BRAIN_TISSUE,
out_col=OMOP.SPECIMEN.CONCEPT_ID
),
# specimen type concept id
constant_map(
m=omop_constants.CONCEPT.COMMON.UNAVAILABLE,
out_col=OMOP.SPECIMEN.TYPE.CONCEPT_ID
),
# specimen anatomic concept id
value_map(
in_col='tumor_primary_location_in',
m=lambda x: int(anatomic_site(x)),
out_col=OMOP.SPECIMEN.ANATOMIC_SITE.CONCEPT_ID
),
# specimen anatomic source value
value_map(
in_col='tumor_primary_location_in',
m=lambda x: x[:50],
out_col=OMOP.SPECIMEN.ANATOMIC_SITE.SOURCE_VALUE
),
# specimen disease status source value
value_map(
in_col='disease_status',
m=lambda x: '-'.join(x.split('_')[0:2]),
out_col=OMOP.SPECIMEN.DISEASE_STATUS.SOURCE_VALUE
),
# specimen disease status concept id
value_map(
in_col='disease_status',
m=lambda x: (omop_constants.CONCEPT.DISEASE_STATUS.NORMAL
if 'normal' in x else
omop_constants.CONCEPT.DISEASE_STATUS.ABNORMAL),
out_col=OMOP.SPECIMEN.DISEASE_STATUS.CONCEPT_ID
),
# specimen datetime, assume birthdate of person is epoch
value_map(
in_col='age_of_initial_diagnosis',
m=lambda x: str(datetime.datetime.fromtimestamp(0) +
datetime.timedelta(float(x) - 1)),
out_col=OMOP.SPECIMEN.DATETIME
)
]
|
kids-first/kf-omop-imports | common/transform.py | <filename>common/transform.py
import os
import logging
logger = logging.getLogger(__name__)
def write_output(output_dir, df_out):
"""
Write transform stage output
"""
# Make stage output dir
output_dir = os.path.join(output_dir, 'transform')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Write dfs to files
for model_cls_name, df in df_out.items():
fp = os.path.join(output_dir, model_cls_name + '.tsv')
df.to_csv(fp, sep='\t')
def run(output_dir, data_dict, transform_func):
"""
Transform mapped data tables into merged dataframes, one df for each
target entity type. transform_func must return a dict where keys are names
of OMOP SQLAlchemy models:
df_out = {
'Person': person_df,
'Speciman': specimen_df
}
:param output_dir: Study directory's output dir
:param data_dict: Dict of mapped dfs from extract stage
:transform_funct: function pointer to a method that merges dfs from
data_dict into the form described above.
"""
logger.info('BEGIN TransformStage')
# Reorganize data - Dict (key=filename, value=df)
dfs = {os.path.split(k)[1].split('.')[0]: df
for k, (_, df) in data_dict.items()
}
# Make dataframes
df_out = transform_func(dfs)
write_output(output_dir, df_out)
logger.info('END TransformStage')
return df_out
|
NoOneZero/Neuro | common/geneticAlgorithmParams.py | class GeneticAlgorithmParams:
list_start_population = [16, 64, 256, 1024, 4096]
list_count_elitism = [0, 0.001, 0.01, 0.1, 0.2]
list_count_of_alive_after_epoch = [0.03125, 0.0625, 0.125, 0.25, 0.5]
list_random_kid = [0, 0.001, 0.01, 0.1, 0.2]
list_mutation_probability = [0, 0.0001, 0.001, 0.01, 0.1]
list_type_selection = ["roulette", "tournament", "rank", "proportional"]
list_type_make_new_population = ["cross 1", "cross 2", "cross 3", "cross 4", "random", "lineral", "one father"]
list_type_breeding = ["simple", "outbreeding genotype", "outbreeding phenotype", "inbreeding genotype", "inbreeding phenotype", "adaptive+1 breeding type gen", "not family breeding"]
def __init__(self) -> None:
self._max_iteration = 1000 # Кількість циклів в 1 еопосі
self._max_epoch = 1000 # Кількість епох
self._start_population = GeneticAlgorithmParams.list_start_population[4] # Кількість осіб взагалі
self._count_elitism = GeneticAlgorithmParams.list_count_elitism[0]
self._count_of_alive_after_epoch = int(self._start_population * GeneticAlgorithmParams.list_count_of_alive_after_epoch[4]) # Кількість виживших пісял того як закінчився минулий раунд
self._random_kid = GeneticAlgorithmParams.list_random_kid[0]
self._mutation_probability = GeneticAlgorithmParams.list_mutation_probability[1] # Ймовірність мутації гена межі [0 до 1]
self._type_selection = GeneticAlgorithmParams.list_type_selection[0]
self._type_make_new_population = GeneticAlgorithmParams.list_type_make_new_population[1]
self._type_breeding = GeneticAlgorithmParams.list_type_breeding[0]
self._count_web_layers()
def _count_web_layers(self):
self._web_layers = [6, 4] # [6+9, 6, 4]#[6+3*3,4]#[6,4]#[6,6,4]
def get_max_epoch(self): return self._max_epoch
def get_max_iteration(self): return self._max_iteration
def get_start_population(self): return self._start_population
def get_count_elitism(self): return self._count_elitism
def get_count_of_alive_after_epoch(self): return self._count_of_alive_after_epoch
def get_random_kid(self): return self._random_kid
def get_mutation_probability(self): return self._mutation_probability
def get_type_selection(self): return self._type_selection
def get_type_make_new_population(self): return self._type_make_new_population
def get_type_breading(self): return self._type_breeding
def get_crossover_point_number(self):
if self._type_make_new_population == 0: return 1
elif self._type_make_new_population == 1: return 2
elif self._type_make_new_population == 2: return 3
elif self._type_make_new_population == 3: return 4
else: return 0
def get_web_layers(self): return self._web_layers
|
NoOneZero/Neuro | common/person.py | <reponame>NoOneZero/Neuro<gh_stars>0
import pygame
from random import randint
class Person:
def __init__(self, position:list = [0,0,0,0,0,0], color:list = [255,255,255], size: int = 20, default_position_range = None) -> None:
self.default_position_range = default_position_range
self.pygame = pygame
self.init(position,color,size)
super().__init__()
def init(self, position:list = [0,0,0,0,0,0], color:list = [255,255,255], size:int = 20):
self.position = position or [0,0,0,0,0,0]
self.speed = [0, 0, 0, 0, 0, 0]
self.color = color or [255,255,255]
self.size = size or 20
def move(self):
for i in range(len(self.position)):
self.position[i] += self.speed[i]
if self.position[i] > 1000000: self.position[i] = 1000000
if self.position[i] < -1000000: self.position[i] = -1000000
def reset_position(self):
if self.default_position_range != None:
for i in range(len(self.position)):
self.position[i] = randint(self.default_position_range[0][i // 2][i % 2],
self.default_position_range[1][i // 2][i % 2])
def draw(self, display):
self.pygame.draw.circle(display,
self.color,
[int(self.position[0]), int(self.position[1])],
int(self.size))
|
NoOneZero/Neuro | common/character.py | from common.web import Web
from common.person import Person
from common.geneticAlgorithmParams import GeneticAlgorithmParams
import random
class Character:
counter = 0
characters_all = []
def __init__(self, person: Person=None, web=None, default_position_range=None) -> None:
self.person = person or Person(default_position_range=default_position_range)
self.web = web or Web(randomize_power=1)
self.fitness = 0
self.adaptive_params = None # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Character.counter += 1
Character.characters_all.append(self)
def add_fitness(self, value): self.fitness += value
def reset_fitness(self): self.fitness = 0
def get_fitness(self): return self.fitness
def reset_person(self): pass
def move(self):
self.person.move()
def draw(self, display):
self.person.draw(display)
@staticmethod
def calculate_new_population(genetic_algorithm_params: GeneticAlgorithmParams):
Character.characters_all = sorted(Character.characters_all, key=lambda x: x.fitness, reverse=True) # Сортує по всіх від того хто з найбільшою фітнес до тих хто з найменшою
# Нормує всіх, так щоб сума всіх фітнес функцій була 1
suma = 0
for i in range(len(Character.characters_all)):
if Character.characters_all[i].fitness <= 0:
Character.characters_all[i].fitness = 0.00001
suma += Character.characters_all[i].fitness
for i in range(len(Character.characters_all)):
Character.characters_all[i].fitness /= suma
# Character.characters_all[i].web.print_neuro_chain()
print(suma/len(Character.characters_all)) # fixme del after test
count_of_alive = genetic_algorithm_params.get_count_of_alive_after_epoch() # Кількість виживших із минулого покоління
web = []# Оригінальний список нейронок
fitness = []# Оригінальний список фітнес функцій
web_temp = []
fitness_temp = []
for i in range(len(Character.characters_all)):
web.append(Character.characters_all[i].web)
web_temp.append(Character.characters_all[i].web)
fitness.append(Character.characters_all[i].fitness)
fitness_temp.append(Character.characters_all[i].fitness)
web_new = [] # всі нейронки,
fitness_new = [] # всі фітнес функції
for i in range(min(count_of_alive, len(fitness))):
n = random.choices(range(len(web_temp)), fitness_temp, k=1)[0]
web_new.append(web_temp.pop(n))
fitness_new.append(fitness_temp.pop(n))
# створення потомства і заповнення залишившихся слотів
for i in range(len(web_new), len(web)):
parents_web = random.choices(web, fitness, k=2) # Вибирається двоє батьків
couple = parents_web[0].cross_crossover_several(parents_web[1],
point_number=genetic_algorithm_params.get_crossover_point_number(),
return_couple=True) # створення двох дітей
web_new.append(couple[0])
web_new.append(couple[1])
i += 1
# Мутації
for i in range(len(web_new)):
web_new[i].make_mutation(randomize_probability=genetic_algorithm_params.get_mutation_probability())
# Оновлення вагів всіх осіб. Заміна старих значень на нові і обновлення стартових позицій
for i in range(len(Character.characters_all)):
Character.characters_all[i].web = web_new[i]
Character.characters_all[i].fitness = 0
Character.characters_all[i].person.reset_position()
|
NoOneZero/Neuro | common/monitor.py | from pygame import time
import pygame
import os
class Monitor:
def __init__(self) -> None:
super().__init__()
self.show_picture = True
self.display_position = (50, 50)
self.display_size = (3400, 1200)
self.display_color = (48, 189, 221)
self.fps_default = 240
self.is_show_info: bool = True
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % self.display_position
pygame.init()
self.display = pygame.display.set_mode(self.display_size)
self.display.fill(self.display_color)
self.clock = time.Clock()
self.clock.tick(self.fps_default)
def control_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit() # sys.exit() if sys is imported
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN: self.show_picture = False
if event.key == pygame.K_UP: self.show_picture = True
if event.key == pygame.K_LEFT: pass
if event.key == pygame.K_RIGHT: pass
if event.key == pygame.K_SPACE: pass
if event.key == pygame.K_z: pass
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN: pass
if event.key == pygame.K_UP: pass
if event.key == pygame.K_LEFT: pass
if event.key == pygame.K_RIGHT: pass
if event.key == pygame.K_SPACE: pass
if event.key == pygame.K_z: pass
def draw(self, environment: list = [], enemies: list = [], character: list = []):
self._draw_fill_display()
if self.show_picture:
self._draw_all(environment=environment, enemies=enemies, character=character)
self._draw_flip()
def _draw_fill_display(self): self.display.fill(self.display_color)
def _draw_all(self, environment: list = [], enemies: list = [], character: list = []):
for i in range(len(environment)): environment[i].draw(self.display)
for i in range(len(enemies)): enemies[i].draw(self.display)
for i in range(len(character)): character[i].draw(self.display)
def _draw_flip(self):
pygame.display.flip()
self.clock.tick(self.fps_default)
def write_data_on_screen(self, text:str): pygame.display.set_caption(text)
|
NoOneZero/Neuro | else/Scene.py | from pygame import time
import pygame
import os
import config
class Scene:
def __init__(self):
self.init_display()
self.init_common()
self.init_battlefield()
self.init_character()
self.read_data()
self.save_data()
def init_display(self):
self.display_position = config.DISPLAY_POSITION
self.display_size = config.DISPLAY_SIZE
self.display_color = config.DISPLAY_COLOR
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % self.display_position
pygame.init()
self.display = pygame.display.set_mode(self.display_size)
self.display.fill(self.display_color)
self.fps_default = config.FPS
self.clock = time.Clock()
def init_common(self):
self.run_iteration = True
self.run_epoch = True
self.iteration = 0
self.epoch = 0
self.population = config.START_POPULATION
self.max_counter = config.MAX_COUNTER
self.max_epoch = config.MAX_EPOCH
def init_battlefield(self): pass
def init_character(self): pass
def read_data(self): pass
def loop(self):
while (self.run_epoch):
while (self.run_iteration):
self.buttons()
self.rule()
self.update()
self.draw()
self.clock_tick()
self.count_counter()
self.exit_iteration()
self.evolution_operation()
self.save_data()
self.start_new_epoch()
self.exit_epoch()
pygame.quit()
def buttons(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit() # sys.exit() if sys is imported
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN: self._action_k_down_press()
if event.key == pygame.K_UP: self._action_k_up_press()
if event.key == pygame.K_LEFT: self._action_k_left_press()
if event.key == pygame.K_RIGHT: self._action_k_right_press()
if event.key == pygame.K_SPACE: self._action_k_space_press()
if event.key == pygame.K_z: self._action_k_z_press()
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN: self._action_k_down_released()
if event.key == pygame.K_UP: self._action_k_up_released()
if event.key == pygame.K_LEFT: self._action_k_left_released()
if event.key == pygame.K_RIGHT: self._action_k_right_released()
if event.key == pygame.K_SPACE: self._action_k_space_released()
if event.key == pygame.K_z: self._action_k_z_released()
def evolution_operation(self): pass
def save_data(self): pass
def start_new_epoch(self):
self.run_iteration = True
self.iteration = 0
self.epoch += 1
def exit_epoch(self):
if self.epoch >=self.max_epoch:
self.run_epoch = False
def rule(self): pass
def update(self): pass
def draw(self):
self._draw_fill_display()
self._draw_all()
self._draw_flip()
def clock_tick(self): self.clock.tick(self.fps_default)
def count_counter(self): self.iteration += 1
def exit_iteration(self):
if self.iteration >= self.max_counter:
self.run_iteration = False
def _draw_fill_display(self): self.display.fill(self.display_color)
def _draw_all(self): pass
def _draw_flip(self): pygame.display.flip()
def _action_k_down_press(self): pass
def _action_k_up_press(self): pass
def _action_k_left_press(self): pass
def _action_k_right_press(self): pass
def _action_k_space_press(self): pass
def _action_k_z_press(self): pass
def _action_k_down_released(self): pass
def _action_k_up_released(self): pass
def _action_k_left_released(self): pass
def _action_k_right_released(self): pass
def _action_k_space_released(self): pass
def _action_k_z_released(self): pass |
NoOneZero/Neuro | common/web.py | <reponame>NoOneZero/Neuro
from common.neuron import Neuron
import random
class Web:
counter = 0
def __init__(self, layers: list=[2, 3, 2], web=None, randomize_power: float =None):
if type(web) == Web: self.layers = list(web.layers)
else: self.layers = layers or [1]
self.neurons = [[]]
for j in range(self.layers[0]):
self.neurons[0].append(Neuron())
for i in range(1, len(self.layers)):
self.neurons.append([])
for j in range(self.layers[i]):
self.neurons[i].append(Neuron(fathers=self.neurons[i - 1], randomize_power=randomize_power))
if type(web) == Web: self.set_weigh_from_else_web(web=web)
self.number = Web.counter
Web.counter += 1
def print_all_info(self):
print("-------------------------------------------------------------------------------------")
print(self, "number : {}/{} architect {}".format(self.number, Web.counter, self.layers))
for i in range(len(self.neurons)):
print()
for j in range(len(self.neurons[i])):
print("Neuron L{},P{}, {}".format(i, j, self.neurons[i][j].get_info()))
def print_neuro_chain(self):
print("number : {}/{} architect {} ".format(self.number, Web.counter, self.layers), end=" ")
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
print(self.neurons[i][j].get_all_weigh(), end=" ")
print(end=" | ")
print()
def get_neuro_chain(self):
text = ""#"number : {}/{} architect {} ".format(self.number, Web.counter, self.layers)
for i in range(len(self.neurons)):
if i == 0: continue
for j in range(len(self.neurons[i])):
weigh = self.neurons[i][j].get_all_weigh()
for k in range(len(weigh)):
text += " {:.2f}".format(weigh[k])
text += " |"
text += "| "
return text
def calculate_all(self, input):
self._set_input(input)
self._calculate()
return self._get_output()
def _set_input(self, input):
for i in range(len(self.neurons[0])):
self.neurons[0][i].set_result(input[i])
def _calculate(self):
for i in range(1, len(self.neurons)):
for j in range(len(self.neurons[i])):
self.neurons[i][j].calculate_result()
def _get_output(self):
return [self.neurons[-1][i].get_result() for i in range(len(self.neurons[-1]))]
def __eq__(self, o: object) -> bool:
if type(o) == Web:
if len(self.layers) != len(o.layers):
return False
for i in range(len(self.layers)):
if self.layers[i] != o.layers[i]:
return False
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
if self.neurons[i][j] != o.neurons[i][j]:
return False
return True
def randomize(self, randomize_power=0.01):
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
self.neurons[i][j].randomize(randomize_power)
def make_mutation(self, randomize_probability = 0.01, randomize_power = 1.):
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
self.neurons[i][j].randomize(randomize_power=randomize_power, randomize_probability=randomize_probability)
def new_randomize_deep_copy(self, randomize_power = 0.1):
return Web(self.layers, web=self, randomize_power=randomize_power)
def set_weigh_from_else_web(self, web):
if type(web) == Web:
if len(self.layers) != len(web.layers):
return
for i in range(len(self.layers)):
if self.layers[i] != web.layers[i]:
return
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
self.neurons[i][j].set_weigh(web.neurons[i][j].get_weigh())
self.neurons[i][j].set_bias(web.neurons[i][j].get_bias())
def set_weigh(self, weigh=[[[1,1],[2,2]],[[3,3],[4,4]]], bias = [[1,2],[3,4]]):
if type(weigh) == list:
if len(self.layers) != len(weigh):
return
for i in range(len(self.layers)):
if self.layers[i] != len(weigh[i]):
print(i)
return
for i in range(len(self.neurons)):
for j in range(len(self.neurons[i])):
self.neurons[i][j].set_weigh(weigh[i][j])
self.neurons[i][j].set_bias(bias[i][j])
def cross_crossover_several(self, neuro_1, point_number: int = 2, return_couple=False):
if point_number < 1: point_number = 1
new_neuro_1 = Web(web=self)
new_neuro_2 = Web(web=neuro_1)
sum = 0
for i in range(len(new_neuro_1.neurons)):
for j in range(len(new_neuro_1.neurons[i])):
sum += new_neuro_1.neurons[i][j].get_count_of_axon()
if point_number >= sum: point_number = sum
possible_points = [i for i in range(sum)]
points = []
for i in range(point_number):
number = random.choice(possible_points)
points.append(number)
possible_points.remove(number)
points.sort()
start_parent = random.choice([False, True])
counter = 0
for i in range(len(new_neuro_1.neurons)):
for j in range(len(new_neuro_1.neurons[i])):
neuro_part_1 = new_neuro_1.neurons[i][j].get_all_weigh()
neuro_part_2 = new_neuro_2.neurons[i][j].get_all_weigh()
for k in range(len(neuro_part_1)):
if len(points) > 0 and counter > points[0]:
start_parent = not start_parent
points.pop(0)
if start_parent:
neuro_part_1[k], neuro_part_2[k] = neuro_part_2[k], neuro_part_1[k]
counter += 1
new_neuro_1.neurons[i][j].set_all_weigh(neuro_part_1)
new_neuro_2.neurons[i][j].set_all_weigh(neuro_part_2)
return [new_neuro_1, new_neuro_2] if return_couple else [new_neuro_1]
def cross_randomize(self, neuro_1):
new_neuro = Web(web=self)
for i in range(len(new_neuro.neurons)):
for j in range(len(new_neuro.neurons[i])):
if random.random() < 0.5:
new_neuro.neurons[i][j].set_all_weigh(neuro_1.neurons[i][j].get_all_weigh())
else:
new_neuro.neurons[i][j].set_all_weigh(self.neurons[i][j].get_all_weigh())
return new_neuro
def axon_line(self, number): pass # fixme add for save data to file
|
NoOneZero/Neuro | common/neuron.py | <gh_stars>0
import random
import math
def sigmoida(x): return 1 / (1 + math.e**(-x))
def tanh(x): return math.tanh(x)
def tanh_div2(x): return math.tanh(x/2)
def tanh_div4(x): return math.tanh(x/4)
class Neuron:
counter = 0
def __init__(self, fathers: list =[], bias_weigh: int=0, activation_funk=tanh, randomize_power=None):
if randomize_power == None:
self.fathers_relation = [Relation(fathers[i], weigh=0) for i in range(len(fathers))]
else:
self.fathers_relation = [Relation(fathers[i], weigh=random.uniform(-abs(randomize_power), abs(randomize_power)))
for i in range(len(fathers))]
self.bias_weigh = bias_weigh if randomize_power == None else (bias_weigh + random.uniform(-abs(randomize_power), abs(randomize_power)))
self.result = 0
self.activation_funk = activation_funk
self.number = Neuron.counter
Neuron.counter = Neuron.counter + 1
def copy(self):
neuron = Neuron(fathers=[], bias_weigh=self.bias_weigh, activation_funk=self.activation_funk)
for i in range(len(self.fathers_relation)):
neuron.fathers_relation.append(self.fathers_relation[i].copy())
return neuron
def copy_no_weigh(self):
fathers = [self.fathers_relation[i].get_direction() for i in range(len(self.fathers_relation))]
neuron = Neuron(fathers=fathers, bias_weigh=self.bias_weigh, activation_funk=self.activation_funk)
return neuron
def copy_random_weigh(self, randomize_power=1, randomize_probability=1):
neuron = Neuron(fathers=[], bias_weigh=self.bias_weigh, activation_funk=self.activation_funk)
for i in range(len(self.fathers_relation)):
neuron.fathers_relation.append(self.fathers_relation[i].copy())
neuron.randomize(randomize_power=randomize_power, randomize_probability=randomize_probability)
return neuron
def randomize(self, randomize_power=1, randomize_probability=1):
if randomize_power < 0:
randomize_power -= randomize_power
for i in range(len(self.fathers_relation)):
if randomize_probability > random.random():
self.fathers_relation[i].randomize(randomize_power=randomize_power)
if randomize_probability > random.random():
self.bias_weigh = self.bias_weigh + random.uniform(-randomize_power, randomize_power)
return self
def get_info(self):
info = "Neuron N {}, fathers:{} ({}), bias: {}, RESULT = {}".format(self.number,
len(self.fathers_relation),
[(f.direction.number, f.weigh) for f in self.fathers_relation],
self.bias_weigh, self.result)
return info
def __eq__(self, o: object) -> bool:
if type(o) == Neuron:
equality_val = 0.001
for i in range(len(self.fathers_relation)):
if abs(self.fathers_relation[i].weigh - o.fathers_relation[i].weigh) > equality_val:
return False
if abs(self.bias_weigh - o.bias_weigh) > equality_val:
return False
return True
def add_father(self, fathers: list=[]):
for i in range(len(fathers)):
self.fathers_relation.append(Relation(fathers[i], 0))
def get_count_of_axon(self):
return len(self.fathers_relation) + 1
def get_result(self): return self.result
def set_result(self, value):self.result = value
def get_bias(self): return self.bias_weigh
def get_weigh(self):
weigh = []
for i in range(len(self.fathers_relation)):
weigh.append(self.fathers_relation[i].get_weigh())
return weigh
def get_all_weigh(self):
all_weigh = self.get_weigh()
all_weigh.append(self.get_bias())
return all_weigh
def set_weigh(self, weigh):
for i in range(len(self.fathers_relation)):
self.fathers_relation[i].set_weigh(weigh[i])
def set_bias(self, bias_weigh): self.bias_weigh = bias_weigh
def set_all_weigh(self, all_weigh: list):
self.set_bias(all_weigh.pop())
self.set_weigh(all_weigh)
def calculate_result(self):
suma = self.sumator()
return self.activation(suma)
def sumator(self):
suma = self.bias_weigh
for i in range(len(self.fathers_relation)):
suma += self.fathers_relation[i].get_multiply()
return suma
def activation(self, number):
self.result = self.activation_funk(number)
return self.result
class Relation:
def __init__(self, direction: Neuron, weigh=0):
self.direction = direction
self.weigh = weigh
def get_multiply(self): return self.weigh * self.direction.get_result()
def get_direction(self): return self.direction
def get_weigh(self): return self.weigh
def set_direction(self, direction): self.direction = direction
def set_weigh(self, weigh): self.weigh = weigh
def copy(self): return Relation(self.direction, self.weigh)
def copy_no_weigh(self, weigh=0): return Relation(self.direction, weigh=weigh)
def randomize(self, randomize_power): self.weigh = self.weigh + random.uniform(-randomize_power, randomize_power)
|
NoOneZero/Neuro | common/excel.py | from openpyxl import load_workbook
from openpyxl import Workbook
class ExcelManager:
def __init__(self, filename = "excel_output/0.xlsx", letter_name= "Sheet5"):
self.filename = filename
self.letter_name=letter_name
self.open_book()
self.open_letter()
def open_book(self):
try:
self.wb = load_workbook(filename = self.filename, data_only=True)
except:
self.wb = Workbook()
self.wb.save(filename = self.filename)
self.wb = load_workbook(filename=self.filename, data_only=True)
def open_letter(self):
try:
self.letter = self.wb[self.letter_name]
except:
self.wb.create_sheet(self.letter_name)
self.letter = self.wb[self.letter_name]
self.save_data()
def read(self):
'''Зчитує коробку даних, прямокутник розміри якого рівні максимальним значенням колонок і рядків для даних комірок'''
i = 0
data = []
for row in self.letter.iter_rows(min_row=1):
i += 1
data.append([])
for j in range(len(row)):
data[-1].append(row[j].value)
return data
def read_rect(self, min_row=1, min_col=1, max_col=3, max_row=6):
'''Зчитує прямокутну коробку даних і повертає їх значення'''
data = []
for row in self.letter.iter_rows(min_row = min_row, min_col = min_col, max_col = max_col, max_row = max_row):
data.append([])
for cell in row:
data[-1].append(cell.value)
return data
def read_one_cell(self, row = 1, column = 1):
'''Зчитує одну конктретну колонку'''
d = self.letter.cell(row=row, column=column)
return d.value
def write_one_cell(self, row = 1, column = 1, value=None):
'''Записує одну конкретну колонку'''
d = self.letter.cell(row = row, column = column, value = value)
self.save_data()
return d
def write_append(self, weights = [45, 105, 100, 0, 50, 70]):
self.letter.append(weights)
self.save_data()
def write_rect(self, start_row = 1, start_column = 1, data = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17]]):
for i, row in zip(range(len(data)), self.letter.iter_rows(
min_row = start_row,
min_col = start_column,
max_col = start_column - 1 + max([len(r) for r in data]),
max_row = start_row - 1 + len(data))):
for j, cell in zip(range(len(data[i])), row):
cell.value = data[i][j]
self.save_data()
return data
def save_data(self):
self.wb.save(self.filename)
self.open_book()
|
NoOneZero/Neuro | test_1_lineral/characterTest.py | <filename>test_1_lineral/characterTest.py
from common.web import Web
from common.character import Character
from test_1_lineral.personTest import PersonTest
from test_1_lineral.geneticAlgorithmParamsTest import GeneticAlgorithmParamsTest
import math
class CharacterTest(Character):
def __init__(self, person=None, web=None, default_position_range=None) -> None:
super(CharacterTest, self).__init__(person, web, default_position_range)
def calculate(self, start_pos, end_pos, goal_relative):
distance = []
for i in range(self.web.layers[0]):
distance.append(end_pos[i // 2][i % 2] - start_pos[i // 2][i % 2])
input_data = []
for i in range(self.web.layers[0]):
input_data.append((self.person.position[i] - start_pos[i // 2][i % 2]) / distance[i])
output = self.web.calculate_all(input_data)
input_to_output = self.web.layers[-1]//self.web.layers[0]
for i in range(self.web.layers[0]):
self.person.speed[i] = output[i * input_to_output] #1 if output[i * input_to_output] > 0.05 else (-1 if output[i * input_to_output]<-0.05 else 0)
for j in range(1, input_to_output):
self.person.speed[i] += output[i * input_to_output+j] * 10**j
def calculate_fitness(self, goal_absolute, frame, geneticAlgorithmParamsTest:GeneticAlgorithmParamsTest):
if geneticAlgorithmParamsTest.get_fitness_way_distance() == 1:
sum_of_squers = 0
for i in range(geneticAlgorithmParamsTest.get_dimension()):
sum_of_squers += (self.person.position[i] - goal_absolute[i // 2][i % 2])**2
self.fitness += 1 / (1 + math.sqrt(sum_of_squers))
if geneticAlgorithmParamsTest.get_fitness_way_side() == 1:
for i in range(geneticAlgorithmParamsTest.get_dimension()):
self.fitness += 1 if self.person.speed[i] * (goal_absolute[i // 2][i % 2] - self.person.position[i]) > 0 else -1
if geneticAlgorithmParamsTest.get_fitness_way_vector() == 1:
if geneticAlgorithmParamsTest.get_dimension() == 1:
self.fitness += 1 if self.person.speed[0] * (goal_absolute[0][0] - self.person.position[0]) > 0 else -1
else:
for i in range(geneticAlgorithmParamsTest.get_dimension()//2):
self.fitness += CharacterTest.find_cos_angle(central_point=[self.person.position[2*i], self.person.position[2*i+1]],
speed_vector=[self.person.speed[2*i], self.person.speed[2*i+1]],
goal_vector=[goal_absolute[i][0], goal_absolute[i][1]])
if geneticAlgorithmParamsTest.get_fitness_way_kill_all_unwanted() == 1:
for i in range(geneticAlgorithmParamsTest.get_dimension()):
if self.person.position[i] > frame[1][i//2][i%2] or self.person.position[i] < frame[0][i//2][i%2]:
self.fitness = 0
@staticmethod
def find_cos_angle(central_point, speed_vector, goal_vector):
return CharacterTest.cos_angle(vector1=speed_vector,
vector2=[goal_vector[0] - central_point[0], goal_vector[1] - central_point[1]])
@staticmethod
def cos_angle(vector1, vector2):
numerator = vector1[0] * vector2[0] + vector1[1] * vector2[1]
denominator = (vector1[0] ** 2 + vector1[1] ** 2) ** 0.5 * (vector2[0] ** 2 + vector2[1] ** 2) ** 0.5
return 0 if denominator == 0 else numerator / denominator
|
NoOneZero/Neuro | test_1_lineral/personTest.py | from common.person import Person
from random import randint
class PersonTest(Person):
def __init__(self, position: list = [0, 0], color: list = [255, 255, 255], size: int = 20, default_position_range=None) -> None:
self.color2 = [randint(2, 50), randint(100, 200), randint(10,50)]
self.color3 = [randint(2, 50), randint(10, 20), randint(100,200)]
super().__init__(position, color, size, default_position_range)
def draw(self, display):
if len(self.position) == 1:
self.pygame.draw.circle(display, self.color, [int(self.position[0]), 500], int(self.size))
elif len(self.position) >= 2:
self.pygame.draw.circle(display, self.color, [int(self.position[0]), int(self.position[1])], int(self.size))
if len(self.position) >= 4:
self.pygame.draw.circle(display, self.color2, [int(self.position[2]), int(self.position[3])], int(self.size))
if len(self.position) >= 6:
self.pygame.draw.circle(display, self.color3, [int(self.position[4]), int(self.position[5])], int(self.size))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.