python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
"""Match items in a dictionary using fuzzy matching
Implemented for pywinauto.
This class uses difflib to match strings.
This class uses a linear search to find the items as it HAS to iterate over
every item in the dictionary (otherwise it would not be possible to know which
is the 'best' match).
If the exact item i... | dd-genomics-master | archived/fuzzy_string_dict.py |
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id', 'int')
ddext... | dd-genomics-master | archived/v1/code/pheno_mentions.py |
import ddext
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.input('dep_paths', 'text[]')
ddext.input('dep_parents', 'int[]')
ddext.input('wordidxs... | dd-genomics-master | archived/v1/code/pair_features.py |
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id_1', 'int')
ddext.input('mention_id_1', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('words_1', 'text[]')
ddext.input('entity_1', 'text')
ddext.input('type_1', 'text')
ddext.input('correct_1', 'bo... | dd-genomics-master | archived/v1/code/gene_pheno_pairs.py |
#! /usr/bin/env python3
#
# This script takes approved symbols, alternate symbols, and approved long names
# from the three dictionaries of genes we currently have, and tries to obtain a
# single dictionary that contains the union of the information available.
#
# The output is a TSV file where the first column is the... | dd-genomics-master | archived/v0/dicts/merge_gene_dicts.py |
#! /usr/bin/env python3
#
# Look for acronyms defined in a document that look like gene symbols
import fileinput
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, list2TSVarray, no_op, \
TSVstring2list
# Return acronyms from sente... | dd-genomics-master | archived/v0/code/ext_gene_find_acronyms.py |
#! /usr/bin/env python3
#
# Extract, add features to, and supervise mentions extracted from geneRifs.
#
import fileinput
from dstruct.Sentence import Sentence
from extract_gene_mentions import extract
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op
from helper.dictionaries import load_dict
... | dd-genomics-master | archived/v0/code/ext_geneRifs_candidates.py |
#! /usr/bin/env python3
#
# Map phenotype abnormalities entities to mentions
import sys
from nltk.stem.snowball import SnowballStemmer
from helper.dictionaries import load_dict
ORDINALS = frozenset(
["1st", "2nd", "3rd", "4th" "5th", "6th" "7th", "8th", "9th", "first",
"second", "third", "fourth", "fifth... | dd-genomics-master | archived/v0/code/hpoterms2mentions.py |
#! /usr/bin/env python3
#
# Extract gene mention candidates and perform distant supervision
#
import fileinput
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_all_phrases_in_sentence, \
get_dict_from_TSVline, TSV... | dd-genomics-master | archived/v0/code/ext_gene_candidates.py |
dd-genomics-master | archived/v0/code/__init__.py | |
#! /usr/bin/env python3
#
# Takes one directory containing parser output files and, for each file in that
# directory, emits TSV lines that can be loaded # in the 'sentences' table
# using the PostgreSQL COPY FROM command.
#
# Parser output files contain "blocks" which are separated by blank lines. Each
# "block" is ... | dd-genomics-master | archived/v0/code/parser2sentences.py |
#! /usr/bin/env python3
#
# Takes as first and only argument a dump obtained using get_dump.sql and
# remove the entries where the gene symbol can be used to express multiple
# genes.
import sys
if len(sys.argv) != 2:
sys.stderr.write("USAGE: {} dump.tsv\n".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv... | dd-genomics-master | archived/v0/code/filter_out_uncertain_genes.py |
#! /usr/bin/env python3
#
# Extract gene mention candidates and perform distant supervision
#
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature, BASE_DIR
import ddl... | dd-genomics-master | archived/v0/code/ext_gene_features.py |
#! /usr/bin/env python3
#
# Canonicalize a dump using the HPO dag
#
# Use the output of filter_out_uncertain_genes.py
import sys
from helper.dictionaries import load_dict
if len(sys.argv) != 2:
sys.stderr.write("USAGE: {} dump.tsv\n".format(sys.argv[0]))
sys.exit(1)
hpoancestors = load_dict("hpoancestors")... | dd-genomics-master | archived/v0/code/canonicalize.py |
#! /usr/bin/env python3
from helper.dictionaries import load_dict
if __name__ == "__main__":
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_orig = load_dict("hpoterms_orig")
for hpoterm_name in hpoterms_orig:
for long_name in inve... | dd-genomics-master | archived/v0/code/find_hpoterms_in_genes.py |
#! /usr/bin/env python3
import fileinput
import random
import re
from nltk.stem.snowball import SnowballStemmer
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from helper.easierlife import get_all_phrases_in_sentence, \
get_dict_from_TSVline, TSVstring2list, no_op
from helper.dictionar... | dd-genomics-master | archived/v0/code/ext_pheno_candidates.py |
#! /usr/bin/env python3
#
# Convert geneRifs file to a file that can be given in input to the NLPparser
# extractor.
import fileinput
import json
import sys
if len(sys.argv) < 2:
sys.stderr.write("USAGE: {} FILE [FILE [FILE [...]]]\n".format(sys.argv[0]))
sys.exit(1)
DOCUMENT_ID = "geneRifs-"
i = 0
with file... | dd-genomics-master | archived/v0/code/geneRifs2NLPparser.py |
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature
import ddlib
def add_features_generic(relation_id, gene_words, pheno_words, sentence):
# Use the generic feature library (ONLY... | dd-genomics-master | archived/v0/code/ext_genepheno_features.py |
#! /usr/bin/env python3
import fileinput
import random
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2list
# Load the gene<->hpote... | dd-genomics-master | archived/v0/code/ext_genepheno_candidates.py |
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature, BASE_DIR
import ddlib
def add_features_generic(mention_id, pheno_words, sentence):
# Use the generic feature library (ONLY!)
... | dd-genomics-master | archived/v0/code/ext_pheno_features.py |
#! /usr/bin/env python3
from helper.dictionaries import load_dict
if __name__ == "__main__":
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_orig = load_dict("hpoterms_orig")
for long_name in inverted_long_names:
for hpoterm_name i... | dd-genomics-master | archived/v0/code/find_genes_in_hpoterms.py |
#! /usr/bin/env python3
#
# Perform comparision between existing HPO mapping and dump from DeepDive
#
# Take the output from canonicalize.py
import sys
if len(sys.argv) != 3:
sys.stderr.write("USAGE: {} hpo dump\n".format(sys.argv[0]))
sys.exit(1)
hpo_genes = set()
hpo_ids = set()
hpo_mappings = set()
with o... | dd-genomics-master | archived/v0/code/compare_dump_to_hpo.py |
#! /usr/bin/env python3
#
# Take the json output of the NLPextractor extractor and convert it to TSV that
# we can feed to the database using COPY FROM. The schema of the table is equal
# to the 'sentences' table except for an additional column at the end which is
# the gene that we know the geneRif contains.
import f... | dd-genomics-master | archived/v0/code/parser2geneRifs.py |
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2bool, \
TSVstring2list
# Add features... | dd-genomics-master | archived/v0/code/gene_gene_relations.py |
#! /usr/bin/env python3
""" An object representing a relation
"""
import json
from helper.easierlife import list2TSVarray
class Relation(object):
doc_id = None
sent_id_1 = None
sent_id_2 = None
type = None
mention_1_id = None
mention_2_id = None
mention_1_words = None
mention_2_word... | dd-genomics-master | archived/v0/code/dstruct/Relation.py |
dd-genomics-master | archived/v0/code/dstruct/__init__.py | |
#! /usr/bin/env python3
""" A Sentence class
Basically a container for an array of Word objects, plus doc_id and sent_id.
Originally obtained from the 'pharm' repository, but modified.
"""
from dstruct.Word import Word
class Sentence(object):
# to avoid bad parse tree that have self-recursion
_MAX_DEP_PATH... | dd-genomics-master | archived/v0/code/dstruct/Sentence.py |
#! /usr/bin/env python3
""" A generic Mention class
Originally obtained from the 'pharm' repository, but modified.
"""
import json
from helper.easierlife import list2TSVarray
class Mention(object):
doc_id = None
sent_id = None
wordidxs = None
type = None
entity = None
words = None
is_c... | dd-genomics-master | archived/v0/code/dstruct/Mention.py |
#! /usr/bin/env python3
""" A Word class
Originally obtained from the 'pharm' repository, but modified.
"""
class Word(object):
doc_id = None
sent_id = None
in_sent_idx = None
word = None
pos = None
ner = None
lemma = None
dep_path = None
dep_parent = None
sent_id = None
... | dd-genomics-master | archived/v0/code/dstruct/Word.py |
dd-genomics-master | archived/v0/code/helper/__init__.py | |
#! /usr/bin/env python3
""" Helper functions to make our life easier.
Originally obtained from the 'pharm' repository, but modified.
"""
import fileinput
import json
import os.path
import sys
from dstruct.Sentence import Sentence
# BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.pa... | dd-genomics-master | archived/v0/code/helper/easierlife.py |
#! /usr/bin/env python3
from helper.easierlife import BASE_DIR
# Load an example dictionary
# 1st column is doc id, 2nd is sentence ids (separated by '|'), 3rd is entity
def load_examples_dictionary(filename):
examples = dict()
with open(filename, 'rt') as examples_dict_file:
for line in examples_dic... | dd-genomics-master | archived/v0/code/helper/dictionaries.py |
#!/usr/bin/env python
# A script for seeing basic statistics about the number and type of gene mentions extracted
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import csv
import re
# Kinds of statistics tracked automatically by postgres "ANALYZE" command
# https://github.com/postgres/p... | dd-genomics-master | archived/analysis/util/dd_analysis_utils.py |
#!/usr/bin/env python
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import os
import csv
from collections import defaultdict
# there is 1 group_by col + 1 total_count + 1 labeled_true + 1 labeled_false + 10 bucket_n
N_COLS = 14
if __name__ == '__main__':
if len(sys.argv) < 4:
pr... | dd-genomics-master | archived/analysis/analyses/mentions-by-entity/process.py |
#!/usr/bin/env python
# A script for seeing basic statistics about the number and type of gene mentions extracted
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
from dd_analysis_utils import process_pg_statistics
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Process.py: ... | dd-genomics-master | archived/analysis/analyses/postgres-stats/process.py |
#!/usr/bin/env python
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import os
import csv
from collections import defaultdict
# there is 1 group_by col + 1 total_count + 1 labeled_true + 1 labeled_false + 10 bucket_n
N_COLS = 14
if __name__ == '__main__':
if len(sys.argv) < 4:
pr... | dd-genomics-master | archived/analysis/analyses/docs-by-entity/process.py |
#!/bin/python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#
# This is the, uh, setup for librarian
#
setup(name="librarian",
version="0.0.dev1",
description="The Librarian maintains important datasets",
author="Jaeho, Abhinav, Mike",
author_emai... | librarian-master | setup.py |
import os
def list_files(directory):
if os.path.isfile(directory):
yield directory
raise StopIteration
for f in os.listdir(directory):
name = directory + '/' + f
if os.path.isfile(name):
yield name
elif os.path.isdir(name):
for f in list_files(nam... | librarian-master | librarian/listfiles.py |
######################################################################################
# #
# A temprorary implementation which uses google sheets as the database for librarian #
# ... | librarian-master | librarian/database.py |
#!/usr/bin/env python
"""Librarian Client Version 0.01
Librarian takes care of all files that leave/enter engagements. When
a partner provides a new datafile (as with Memex ads), they get added
to Librarian. When we ship extracted data elsewhere, they get added
to Librarian.
It can also be used to track standard ut... | librarian-master | librarian/librarian.py |
librarian-master | librarian/__init__.py | |
#!/bin/python
"""Database connectivity for Librarian.
This module contains all the classes and miscellany necessary for
Librarian to connect to a shared backend RDBMS for metadata. It
is not designed to hold raw content, just the file names, version
history, checksums, etc.
Schema:
'Engagements': ['id', 'name', 'dat... | librarian-master | librarian/dbconn.py |
#!/usr/bin/env python
# upload-s3.py -- Librarian script that takes care of uploading data to AWS S3
import boto
import boto.s3.connection
import os
import datetime
def list_files(directory):
''' Generator to recursively list all the files in a directory. '''
if os.path.isfile(directory):
yield direct... | librarian-master | librarian/storage_s3.py |
librarian-master | tests/__init__.py | |
from nose.tools import *
import librarian
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
| librarian-master | tests/librarian_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from model import JOHMRLite
import os
import glob
import json
from pathlib import Path
import argparse
import re
impo... | d3d-hoi-main | visualization/visualize_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
from pytorch3d.structures import Meshes
from utils import rotation_matrix
from pytorch3d.io import save_obj
from pytorch3d.transforms imp... | d3d-hoi-main | visualization/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongSh... | d3d-hoi-main | visualization/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from natsort import natsorted
from model import JOHMRLite
import os
import... | d3d-hoi-main | visualization/annotation/qt.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import pdb
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_mat... | d3d-hoi-main | visualization/annotation/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongS... | d3d-hoi-main | visualization/annotation/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
from model import JOHMRModel
from utils import (
initialize_render, merge_meshes,
load_motion,
save_meshes, save_parameters
)
import json
import tqdm
from matplotlib.image import imsave
import matplotlib.pyplot as plt
import cv2
impor... | d3d-hoi-main | optimization/optimize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_matrix_batch
fr... | d3d-hoi-main | optimization/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongSh... | d3d-hoi-main | optimization/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from pytorch3d.transforms import (
so3_relative_angle,
euler_angles_to_matrix
)
from scipy.spatial.distance import cdist
import json
from utils import (
load_motion,
)
impo... | d3d-hoi-main | optimization/evaluate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from skimage import io
from torch.utils.data import Dataset
import json
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from PIL import Image
import cv2
from natsort import natsorted
from utils impor... | d3d-hoi-main | optimization/dataloader.py |
import os
import argparse
import ntpath
import common
import pdb
import open3d as o3d
import numpy as np
class Simplification:
"""
Perform simplification of watertight meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options ... | d3d-hoi-main | preprocess/3_simplify.py |
# Copyright (c) Facebook, Inc. and its affiliates.import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
import pdb
import glob
import natsort
from torch.au... | d3d-hoi-main | preprocess/visualize_data.py |
import math
import numpy as np
import os
from scipy import ndimage
import common
import argparse
import ntpath
# Import shipped libraries.
import librender
import libmcubes
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusionc... | d3d-hoi-main | preprocess/2_fusion.py |
import os
import subprocess
from tqdm import tqdm
from multiprocessing import Pool
def convert(obj_path):
try:
load_folder = os.path.join(obj_path, 'parts_ply')
save_folder = os.path.join(obj_path, 'parts_off')
part_paths = [f.path for f in os.scandir(load_folder)]
if not os.path.... | d3d-hoi-main | preprocess/convert_off.py |
import pdb
import subprocess
import scandir
from multiprocessing import Pool
import json
import common
def remesh(obj_path):
in_dir = os.path.join(obj_path, 'parts_off/')
scaled_dir = os.path.join(obj_path, 'parts_scaled_off/')
depth_dir = os.path.join(obj_path, 'parts_depth_off/')
fused_dir = os.path... | d3d-hoi-main | preprocess/re-meshing.py |
"""
Some I/O utilities.
"""
import os
import time
import h5py
import math
import numpy as np
def write_hdf5(file, tensor, key = 'tensor'):
"""
Write a simple tensor, i.e. numpy array ,to HDF5.
:param file: path to file to write
:type file: str
:param tensor: tensor to write
:type tensor: nump... | d3d-hoi-main | preprocess/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from tqdm import tqdm
import re
import open3d as o... | d3d-hoi-main | preprocess/process_data.py |
import os
import common
import argparse
import numpy as np
import json
class Scale:
"""
Scales a bunch of meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
def get_parser(self):
"""
... | d3d-hoi-main | preprocess/1_scale.py |
import re
# Defining labels
ABSTAIN = 0
ABNORMAL = 1
NORMAL= 2
def LF_report_is_short(report):
"""
Checks if report is short.
"""
return NORMAL if len(report) < 280 else ABSTAIN
negative_inflection_words = ["but", "however", "otherwise"]
def LF_negative_inflection_words_in_report(report):
return ... | cross-modal-ws-demo-master | openi_demo/labeling_functions.py |
import os
import numpy as np
import torch
import torchvision.transforms as transforms
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
def load_ids(filename):
fin = open(filename, "r")
return [_.strip() for _ in fin]
class StdNormalize(object):
"""
Normalize torch ... | cross-modal-ws-demo-master | openi_demo/utils.py |
import re
import spacy
spacy_en = spacy.load('en_core_web_sm')
# Setting LF output values
ABSTAIN_VAL = 0
SEIZURE_VAL = 1
NO_SEIZURE_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
####################################... | cross-modal-ws-demo-master | lfs/lfs_eeg.py |
import re
# Setting LF output values
ABSTAIN_VAL = 0
ABNORMAL_VAL = 1
NORMAL_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
############################################################################################... | cross-modal-ws-demo-master | lfs/lfs_msk.py |
import re
# Setting LF output values
ABSTAIN_VAL = 0
ABNORMAL_VAL = 1
NORMAL_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
############################################################################################... | cross-modal-ws-demo-master | lfs/lfs_cxr.py |
import re
# Setting LF output values
ABSTAIN_VAL = 0
HEMORRHAGE_VAL = 1
NO_HEMORRHAGE_VAL = -1
######################################################################################################
##### LABELING FUNCTIONS (LFs)
#########################################################################################... | cross-modal-ws-demo-master | lfs/lfs_hct.py |
import os, requests, sys, unittest
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import cPickle
from snorkel.parser import *
ROOT = os.environ['SNORKELHOME']
class TestParsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sp = SentenceParser()
@classmethod
def tearDownC... | ddbiolib-master | test/parser_tests.py |
"""
Bioinformatics Tools for Data Programming
==================================
ddioblib is a library for creating and interaction with
onotologies to create forms for weak supervision for
machine learning systems like DeepDive and Snorkel
See http://deepdive.stanford.edu/ for more information
"""
__version__ = "0.... | ddbiolib-master | ddbiolib/__init__.py |
from .serialization import *
| ddbiolib-master | ddbiolib/parsers/__init__.py |
import os
import sys
import glob
import codecs
import cPickle
import numpy as np
#################################################
# Parser Serializers
#################################################
class SerializedParser(object):
def __init__(self,parser,encoding="utf-8"):
'''Interface for persisting ... | ddbiolib-master | ddbiolib/parsers/serialization.py |
from .umls import *
from .bioportal import *
from .ctd import *
from .specialist import * | ddbiolib-master | ddbiolib/ontologies/__init__.py |
from .base import * | ddbiolib-master | ddbiolib/ontologies/bioportal/__init__.py |
import unicodecsv
def load_bioportal_dictionary(filename, ignore_case=True):
'''BioPortal Ontologies
http://bioportal.bioontology.org/'''
reader = unicodecsv.reader(open(filename,"rb"), delimiter=',', quotechar='"', encoding='utf-8')
d = [line for line in reader]
dictionary = {}
for line ... | ddbiolib-master | ddbiolib/ontologies/bioportal/base.py |
from .base import * | ddbiolib-master | ddbiolib/ontologies/specialist/__init__.py |
'''
The SPECIALIST Lexicon
"The SPECIALIST lexicon is a large syntactic lexicon of biomedical and general English,
designed/developed to provide the lexical information needed for the SPECIALIST
Natural Language Processing System (NLP) which includes SemRep, MetaMap, and the
Lexical Tools. It is intended to be a ge... | ddbiolib-master | ddbiolib/ontologies/specialist/base.py |
# Comparative Toxicogenomics Database
# http://ctdbase.org/downloads/
from .base import * | ddbiolib-master | ddbiolib/ontologies/ctd/__init__.py |
import codecs
def load_ctd_dictionary(filename, ignore_case=True):
'''Comparative Toxicogenomics Database'''
d = {}
header = ['DiseaseName', 'DiseaseID', 'AltDiseaseIDs', 'Definition',
'ParentIDs', 'TreeNumbers', 'ParentTreeNumbers', 'Synonyms',
'SlimMappings']
sy... | ddbiolib-master | ddbiolib/ontologies/ctd/base.py |
from collections import namedtuple
DatabaseConfig = namedtuple("DatabaseConfig",["host","username","dbname","password"])
DEFAULT_UMLS_CONFIG = DatabaseConfig(host="127.0.0.1",
username="umls",
dbname="2014AB",
... | ddbiolib-master | ddbiolib/ontologies/umls/config.py |
import os
import networkx as nx
from ...utils import database
class SemanticNetwork(object):
"""
The UMLS Semantic Network defines 133 semantic types and 54 relationships
found in the UMLS Metathesaurus. There are two branches: Entity and Event
https://www.ncbi.nlm.nih.gov/books/NBK9679/
"""... | ddbiolib-master | ddbiolib/ontologies/umls/semantic_network.py |
from .config import *
from .metathesaurus import *
from .semantic_network import *
from .lf_factory import *
from .dictionary import * | ddbiolib-master | ddbiolib/ontologies/umls/__init__.py |
'''
Noise-aware Dictionary
Create a snapshot of all UMLS terminology, broken down by
semantic type (STY) and source vocabulary (SAB).
Treat these as competing experts, generating labeling
functions for each
@author: jason-fries [at] stanford [dot] edu
'''
import os
import re
import bz2
import sys
import glob
import ... | ddbiolib-master | ddbiolib/ontologies/umls/lf_factory.py |
import re
import os
import networkx as nx
from ...utils import database
from .config import DEFAULT_UMLS_CONFIG
from .semantic_network import SemanticNetwork
class Metathesaurus(object):
"""
This class hides a bunch of messy SQL queries that interface with a UMLS
Metathesaurus database instance, snapshots ... | ddbiolib-master | ddbiolib/ontologies/umls/metathesaurus.py |
'''
UMLS Dictionary
TODO: all dictionaries should be persisted in Snorkel's
eventual "context" ORM interface
@author: jason-fries [at] stanford [dot] edu
'''
import os
import re
import bz2
import sys
import glob
import codecs
import itertools
from functools import partial
from collections import defaultdict
from .me... | ddbiolib-master | ddbiolib/ontologies/umls/dictionary.py |
import os
import sys
import glob
import codecs
import subprocess
from collections import namedtuple
from ..utils import download
from ..corpora import Corpus,Document,DocParser
from ..parsers import PickleSerializedParser
class NcbiDiseaseParser(DocParser):
'''
The NCBI disease corpus is fully annotated at the... | ddbiolib-master | ddbiolib/datasets/ncbi_disease.py |
from .ncbi_disease import *
from .ncbi_legacy import * | ddbiolib-master | ddbiolib/datasets/__init__.py |
import os
import sys
import glob
import codecs
import subprocess
from collections import namedtuple
from ..utils import download
from ..corpora import Corpus,Document,DocParser
from ..parsers import PickleSerializedParser
class CdrParser(DocParser):
'''
The CDR disease corpus
... | ddbiolib-master | ddbiolib/datasets/cdr.py |
'''
DEPRICATED
Include only for backwards compatibility with TACL experiments
'''
import os
import re
import sys
import glob
import codecs
import cPickle
import operator
import itertools
import numpy as np
from collections import namedtuple
Annotation = namedtuple('Annotation', ['text_type','start','end','text','ment... | ddbiolib-master | ddbiolib/datasets/ncbi_legacy.py |
ddbiolib-master | ddbiolib/datasets/chemdner.py | |
import psycopg2
import mysql.connector
class DatabaseI(object):
'''Simple database wrapper. This mostly mirrors psycopg2 / mysql.connector
functionality with some assurances built in for closing connections
upon object destruction.
TODO: check if this is actually required'''
def __init_... | ddbiolib-master | ddbiolib/utils/database.py |
from .base import *
from .database import * | ddbiolib-master | ddbiolib/utils/__init__.py |
import os
from urllib2 import urlopen, URLError, HTTPError
def download(url,outfname):
try:
data = urlopen(url)
with open(outfname, "wb") as f:
f.write(data.read())
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.r... | ddbiolib-master | ddbiolib/utils/base.py |
#from .base import *from .base import *z
from .base_snorkel import * | ddbiolib-master | ddbiolib/versioning/__init__.py |
'''
Snorkel Candidate Version
'''
import os
import sys
import glob
import hashlib
import cPickle
from datetime import datetime
def dict2str(d):
'''Convert dictionary to tuple pair string'''
return str(d).encode("utf-8",errors="ignore")
def checksum(s):
'''Create checksum for input object'''
if type(... | ddbiolib-master | ddbiolib/versioning/base_snorkel.py |
import os
import random
import hashlib
from datetime import datetime
from ddlite import *
def dict2str(d):
'''Convert dictionary to tuple pair string'''
return str(d).encode("utf-8",errors="ignore")
def checksum(s):
'''Create checksum for input object'''
if type(s) is dict:
s = dict2str(s)
... | ddbiolib-master | ddbiolib/versioning/base.py |
from .base import *
from .doc_parsers import *
from .utils import *
| ddbiolib-master | ddbiolib/corpora/__init__.py |
import re
# Originally from http://effbot.org/zone/unicode-gremlins.htm
# Replaced definitions to conform to:
# ftp://ftp.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT
# http://www.microsoft.com/typography/unicode/1252.htm
cp1252 = {
u"\x80": u"\u20AC", # EURO SIGN
u"\x81": u"", # ... | ddbiolib-master | ddbiolib/corpora/utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.