keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodeScaleBox.py | .py | 10,077 | 231 | import bpy
import numpy as np
from .nodesBoolmultiplex import axes_demultiplexer_node_group
#initialize scalebox node group
# Arguably should be refactored to a rescaled primitive cube -> subdivide modifier -> set pos to max -> merge by distance
# Might be simpler than separate mesh grids, and have less redundancy
def scalebox_node_group():
node_group = bpy.data.node_groups.get("_scalebox")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "_scalebox")
links = node_group.links
interface = node_group.interface
# -- get Input --
interface.new_socket("Size (µm)",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].default_value = (7.0, 5.0, 4.0)
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000000.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Size (m)",in_out="INPUT",socket_type='NodeSocketVector')
interface.items_tree[-1].default_value = (13.0, 10.0, 6.0)
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 10000000.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("µm per tick",in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 10
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 3.4028234663852886e+38
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Axis Selection",in_out="INPUT",socket_type='NodeSocketInt')
interface.items_tree[-1].default_value = 1111111
interface.items_tree[-1].min_value = 0
interface.items_tree[-1].max_value = 1111111
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-800,0)
#output Geometry
interface.new_socket("Geometry",in_out="OUTPUT",socket_type='NodeSocketGeometry')
interface.items_tree[-1].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (2900,0)
join_geo = node_group.nodes.new("GeometryNodeJoinGeometry")
join_geo.location = (2100,100)
## -- process IO --
loc_0 = node_group.nodes.new("ShaderNodeVectorMath")
loc_0.operation = "MULTIPLY"
loc_0.location = (-600, -100)
loc_0.label = "location 0,0,0"
links.new(group_input.outputs.get("Size (m)"), loc_0.inputs[0])
loc_0.inputs[1].default_value = (-0.5,-0.5,0)
loc_max = node_group.nodes.new("ShaderNodeVectorMath")
loc_max.operation = "MULTIPLY"
loc_max.location = (-450, -100)
loc_max.label = "location max val"
links.new(group_input.outputs.get("Size (m)"), loc_max.inputs[0])
loc_max.inputs[1].default_value = (0.5,0.5,1)
m_per_µm = node_group.nodes.new("ShaderNodeVectorMath")
m_per_µm.operation = "DIVIDE"
m_per_µm.location = (-600, -300)
m_per_µm.label = "m per µm"
links.new(group_input.outputs.get("Size (m)"), m_per_µm.inputs[0])
links.new(group_input.outputs.get("Size (µm)"), m_per_µm.inputs[1])
# -- make scale box and read out/store normals
demultiplex_axes = node_group.nodes.new('GeometryNodeGroup')
demultiplex_axes.node_tree = axes_demultiplexer_node_group()
demultiplex_axes.location = (-600, -600)
links.new(group_input.outputs.get('Axis Selection'), demultiplex_axes.inputs[0])
µm_ticks_float = node_group.nodes.new("ShaderNodeVectorMath")
µm_ticks_float.operation = "DIVIDE"
µm_ticks_float.location = (-600, 200)
µm_ticks_float.label = "float-nr of ticks"
links.new(group_input.outputs.get("Size (µm)"), µm_ticks_float.inputs[0])
links.new(group_input.outputs.get("µm per tick"), µm_ticks_float.inputs[1])
n_ticks_int = node_group.nodes.new("ShaderNodeVectorMath")
n_ticks_int.operation = "CEIL"
n_ticks_int.location = (-450, 200)
n_ticks_int.label = "nr of ticks"
links.new(µm_ticks_float.outputs[0], n_ticks_int.inputs[0])
ticks_offset = node_group.nodes.new("ShaderNodeVectorMath")
ticks_offset.operation = "ADD"
ticks_offset.location = (-300, 200)
ticks_offset.label = "add 0th tick"
links.new(n_ticks_int.outputs[0], ticks_offset.inputs[0])
ticks_offset.inputs[1].default_value = (1,1,1)
µm_overshoot = node_group.nodes.new("ShaderNodeVectorMath")
µm_overshoot.operation = "MULTIPLY"
µm_overshoot.location = (-450, 000)
µm_overshoot.label = "µm full grid"
links.new(n_ticks_int.outputs[0], µm_overshoot.inputs[0])
links.new(group_input.outputs.get("µm per tick"), µm_overshoot.inputs[1])
size_overshoot = node_group.nodes.new("ShaderNodeVectorMath")
size_overshoot.operation = "MULTIPLY"
size_overshoot.location = (-300, 0)
size_overshoot.label = "overshoot size vec"
links.new(µm_overshoot.outputs[0], size_overshoot.inputs[0])
links.new(m_per_µm.outputs[0], size_overshoot.inputs[1])
size_overshootXYZ = node_group.nodes.new("ShaderNodeSeparateXYZ")
size_overshootXYZ.location = (-150, 0)
size_overshootXYZ.label = "grid size"
links.new(size_overshoot.outputs[0], size_overshootXYZ.inputs[0])
n_ticksXYZ = node_group.nodes.new("ShaderNodeSeparateXYZ")
n_ticksXYZ.location = (-150, 200)
n_ticksXYZ.label = "n ticks"
links.new(ticks_offset.outputs[0], n_ticksXYZ.inputs[0])
# make principal box
finals = []
for sideix, side in enumerate(['bottom', 'top']):
for axix, ax in enumerate(['xy','yz','zx']):
grid = node_group.nodes.new("GeometryNodeMeshGrid")
grid.label = ax+"_"+side
grid.name = ax+"_"+side+"_grid"
for which, axis in enumerate("xyz"):
if axis in ax:
links.new(size_overshootXYZ.outputs[which], grid.inputs[ax.find(axis)])
links.new(n_ticksXYZ.outputs[which], grid.inputs[ax.find(axis)+2])
transform = node_group.nodes.new("GeometryNodeTransform")
links.new(grid.outputs[0], transform.inputs[0])
if side == "top":
pretransform = node_group.nodes.new("ShaderNodeVectorMath")
pretransform.operation = 'MULTIPLY'
links.new(group_input.outputs.get("Size (m)"), pretransform.inputs[0])
links.new(pretransform.outputs[0], transform.inputs.get("Translation"))
# shift tops to the correct plane (out-of-axis) and calc rotation
shift = np.array([float(axis not in ax) for axis in "xyz"])
pretransform.inputs[1].default_value = tuple(shift)
else:
pretransform = node_group.nodes.new("GeometryNodeFlipFaces")
links.new(grid.outputs[0], pretransform.inputs[0])
links.new(pretransform.outputs[0], transform.inputs[0])
rot = [0,0,0]
if ax == "yz":
rot = [0.5,0,0.5]
elif ax == 'zx':
rot = [0,-0.5,-0.5]
transform.inputs.get("Rotation").default_value = tuple(np.array(rot)*np.pi)
# translocate 0,0 to be accurate for each
bbox = node_group.nodes.new("GeometryNodeBoundBox")
links.new(transform.outputs[0],bbox.inputs[0])
find_0 = node_group.nodes.new("ShaderNodeVectorMath")
find_0.operation = "SUBTRACT"
links.new(loc_0.outputs[0], find_0.inputs[0])
links.new(bbox.outputs[1], find_0.inputs[1])
if side == "top":
top = node_group.nodes.new("ShaderNodeVectorMath")
top.operation = "MULTIPLY"
links.new(bbox.outputs[1], top.inputs[0])
links.new(top.outputs[0], find_0.inputs[1])
top.inputs[1].default_value = tuple(np.array([float(axis in ax) for axis in "xyz"]))
else:
top = None
set_pos = node_group.nodes.new("GeometryNodeSetPosition")
links.new(transform.outputs[0], set_pos.inputs[0])
links.new(find_0.outputs[0], set_pos.inputs[-1])
notnode = node_group.nodes.new("FunctionNodeBooleanMath")
notnode.operation = "NOT"
links.new(demultiplex_axes.outputs[sideix*3 + axix + 1], notnode.inputs[0])
del_ax = node_group.nodes.new("GeometryNodeDeleteGeometry")
links.new(set_pos.outputs[0], del_ax.inputs[0])
links.new(notnode.outputs[0], del_ax.inputs[1])
for locix,node in enumerate([grid,pretransform, transform,bbox,top,
find_0, set_pos, notnode, del_ax]):
if node:
nodeix = (sideix*3)+axix
node.location = (200*locix + 200, 300*2.5 + nodeix * -300)
finals.append(del_ax)
for final in reversed(finals):
links.new(final.outputs[0], join_geo.inputs[0])
# set final nodes to right position
pos = node_group.nodes.new("GeometryNodeInputPosition")
pos.location = (2100, -100)
min_axis = node_group.nodes.new("ShaderNodeVectorMath")
min_axis.operation = "MINIMUM"
min_axis.location = (2250, -100)
links.new(pos.outputs[0], min_axis.inputs[0])
links.new(loc_max.outputs[0], min_axis.inputs[1])
clip_axis = node_group.nodes.new("GeometryNodeSetPosition")
clip_axis.location = (2500, 0)
clip_axis.label = "clip final axis"
links.new(join_geo.outputs[0], clip_axis.inputs[0])
links.new(min_axis.outputs[0], clip_axis.inputs[2])
merge = node_group.nodes.new("GeometryNodeMergeByDistance")
links.new(clip_axis.outputs[0], merge.inputs[0])
merge.location = (2700, 0)
links.new(merge.outputs[0],group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/nodesBoolmultiplex.py | .py | 4,415 | 103 | import bpy
def axes_multiplexer_node_group():
node_group = bpy.data.node_groups.get("multiplex_axes")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "multiplex_axes")
links = node_group.links
interface = node_group.interface
interface.new_socket("frontface culling (clip axes in front of data)", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
for sideix, side in enumerate(['bottom', 'top']):
for axix, ax in enumerate(['xy','yz','zx']):
interface.new_socket(ax + " " + side, in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-800,0)
base = node_group.nodes.new("FunctionNodeInputInt")
base.integer = 0
base.location = (-800, 300)
trav = node_group.nodes.new("ShaderNodeValue")
trav.outputs[0].default_value = 0.1
trav.location = (-800, 200)
lastout = (base.outputs[0], trav.outputs[0])
for ix, inputbool in enumerate(group_input.outputs[:-1]):
mult = node_group.nodes.new("ShaderNodeMath")
mult.operation = "MULTIPLY"
mult.inputs[1].default_value = 10
links.new(lastout[1], mult.inputs[0])
add = node_group.nodes.new("ShaderNodeMath")
add.operation = "ADD"
links.new(lastout[0], add.inputs[0])
links.new(mult.outputs[0], add.inputs[1])
switch = node_group.nodes.new("GeometryNodeSwitch")
switch.input_type = "INT"
links.new(inputbool, switch.inputs[0])
links.new(lastout[0], switch.inputs.get('False'))
links.new(add.outputs[0], switch.inputs.get('True'))
lastout = (switch.outputs[0], mult.outputs[0])
for colix, node in enumerate([mult, add, switch]):
node.location = (-600 + colix * 200 + ix *200, ix *-200 +500)
interface.new_socket("multi-selection", in_out="OUTPUT",socket_type='NodeSocketInt')
interface.items_tree[0].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (-300 + colix * 200 + ix *200, ix *-200 +500)
links.new(lastout[0], group_output.inputs[0])
return node_group
def axes_demultiplexer_node_group():
node_group = bpy.data.node_groups.get("demultiplex_axes")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'GeometryNodeTree', name = "demultiplex_axes")
links = node_group.links
interface = node_group.interface
interface.new_socket("multi-selection", in_out="INPUT",socket_type='NodeSocketInt')
interface.items_tree[0].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (-800,0)
interface.new_socket("frontface culling (clip axes in front of data)", in_out="OUTPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].attribute_domain = 'POINT'
for sideix, side in enumerate(['bottom', 'top']):
for axix, ax in enumerate(['xy','yz','zx']):
interface.new_socket(ax + " " + side, in_out="OUTPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (1200, 0)
for ix, outputbool in enumerate(group_output.inputs[:-1]):
div = node_group.nodes.new("ShaderNodeMath")
div.operation = "DIVIDE"
links.new(group_input.outputs[0], div.inputs[0])
div.inputs[1].default_value = 10 ** ix
trunc = node_group.nodes.new("ShaderNodeMath")
trunc.operation = "TRUNC"
links.new(div.outputs[0], trunc.inputs[0])
mod = node_group.nodes.new("ShaderNodeMath")
mod.operation = "MODULO"
mod.inputs[1].default_value = 10
links.new(trunc.outputs[0], mod.inputs[0])
links.new(mod.outputs[0], outputbool)
for colix, node in enumerate([div, trunc, mod]):
node.location = (-600 + colix * 200 + ix *200, ix *200 -500)
return node_group | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/nodeRemapObjectID.py | .py | 2,706 | 63 | import bpy
def remap_oid_node():
node_group = bpy.data.node_groups.get("Labelmask Remap Switch")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'ShaderNodeTree', name = "Labelmask Remap Switch")
links = node_group.links
interface = node_group.interface
interface.new_socket("Value", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Revolving Colormap", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("# Colors", in_out="INPUT",socket_type='NodeSocketInt')
interface.items_tree[-1].default_value = 10
interface.items_tree[-1].attribute_domain = 'POINT'
interface.items_tree[-1].min_value = 0
interface.items_tree[-1].max_value = 32
interface.new_socket("# Objects", in_out="INPUT",socket_type='NodeSocketInt')
interface.items_tree[-1].default_value = 100
interface.items_tree[-1].attribute_domain = 'POINT'
interface.items_tree[-1].min_value = 0
interface.new_socket("Fac", in_out="OUTPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (0,0)
mod = node_group.nodes.new("ShaderNodeMath")
mod.location = (200, 0)
mod.operation = "MODULO"
links.new(group_input.outputs.get("Value"), mod.inputs[0])
links.new(group_input.outputs.get("# Colors"), mod.inputs[1])
add = node_group.nodes.new("ShaderNodeMath")
add.location = (200, -200)
add.operation = "ADD"
links.new(group_input.outputs.get("# Colors"), add.inputs[0])
add.inputs[1].default_value = 1
map_range = node_group.nodes.new("ShaderNodeMapRange")
map_range.location = (400, 0)
links.new(mod.outputs[0], map_range.inputs[0])
links.new(add.outputs[0], map_range.inputs[2])
map_range2 = node_group.nodes.new("ShaderNodeMapRange")
map_range2.location = (400, -300)
links.new(group_input.outputs.get('Value'), map_range2.inputs[0])
links.new(group_input.outputs.get('# Objects'), map_range2.inputs[2])
mix = node_group.nodes.new("ShaderNodeMix")
mix.location = (600, -100)
links.new(group_input.outputs.get('Revolving Colormap'), mix.inputs[0])
links.new(map_range.outputs[0], mix.inputs[3])
links.new(map_range2.outputs[0], mix.inputs[2])
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (800, -100)
links.new(mix.outputs[0], group_output.inputs[0])
return node_group | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/cmap_menus.py | .py | 3,149 | 76 | import bpy
import numpy as np
import cmap
CMAP_CATEGORIES = {
"sequential": "IPO_LINEAR",
"diverging": "LINCURVE",
"cyclic" : "MESH_CIRCLE",
"qualitative":"OUTLINER_DATA_POINTCLOUD",
"miscellaneous":"ADD",
}
def cmap_submenu_class(op, opname, category, namespace=None):
def draw(self, context):
if self.namespace is None:
for namespace in sorted(cmap_namespaces(self.category)):
self.layout.menu(cmap_bl(self.category, namespace, opname=opname)[0], text=cmap_bl(category, namespace, opname=opname)[1])
else:
for cmap in cmaps(self.category, self.namespace):
if cmap != 'prinsenvlag': # exclude this colormap, this is a weird fascist dogwhistle
op_ = self.layout.operator(op, text=cmap)
op_.cmap_name = cmap
# menu_items.get_submenu("utils").menu(self.layout, context)
cls_elements = {
'bl_idname': cmap_bl(category, namespace, opname=opname)[0],
'bl_label': cmap_bl(category, namespace, opname=opname)[1],
'category' : category,
'namespace' : namespace,
'draw' : draw
}
# Dynamically create classes for cmap submenus
menu_class = type(
cmap_bl(category, namespace, opname=opname)[0],
(bpy.types.Menu,),
cls_elements
)
return menu_class
def cmap_namespaces(categories):
return list({cmap_name.split(':')[0] for cmap_name in cmap.Catalog().unique_keys(categories=categories, prefer_short_names=False)})
def cmaps(category, namespace):
return list({cmap_name.split(':')[1] for cmap_name in cmap.Catalog().unique_keys(categories=[category], prefer_short_names=False) if cmap_name.split(':')[0] == namespace})
def cmap_bl(category, namespace=None, name=None, opname=None):
if name is not None:
return f"MIN_MT_{category.upper()}_{namespace.upper()}_{name.upper()}_{opname.upper()}", name
if namespace is not None:
return f"MIN_MT_{category.upper()}_{namespace.upper()}_{opname.upper()}", namespace
return f"MIN_MT_{category.upper()}_{opname.upper()}", category
def cmap_catalog():
for category in CMAP_CATEGORIES:
for cmap_name in cmap.Catalog().unique_keys(categories=[category], prefer_short_names=False):
yield category, cmap_name.split(':')[0], cmap_name.split(':')[1]
def draw_category_menus(self, context, op, opname):
for category in CMAP_CATEGORIES:
self.layout.menu(cmap_bl(category, opname=opname)[0], text=cmap_bl(category,opname=opname)[1].capitalize(), icon=CMAP_CATEGORIES[category])
op_ = self.layout.operator(op, text="Single Color", icon="MESH_PLANE")
op_.cmap_name = 'single_color'
CLASSES = []
for op, opname in [('microscopynodes.add_lut', "ADD"), ('microscopynodes.replace_lut', 'REPLACE')]:
CLASSES = CLASSES + [cmap_submenu_class(op, opname, category) for category in CMAP_CATEGORIES]
for category in CMAP_CATEGORIES:
CLASSES.extend([cmap_submenu_class(op, opname, category, namespace) for namespace in cmap_namespaces(categories=category)])
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/__init__.py | .py | 1,397 | 37 | from . import cmap_menus
from .nodeVolumeAlpha import volume_alpha_node
from .handle_cmap import set_color_ramp_from_ch, get_lut
from .nodeRemapObjectID import remap_oid_node
from . import ops
import bpy
class MIN_MT_CMAP_ADD(bpy.types.Menu):
bl_idname = "MIN_MT_CMAP_ADD"
bl_label = "Add LUT"
def draw(self, context):
cmap_menus.draw_category_menus(self, context, "microscopynodes.add_lut", "ADD")
class MIN_MT_CMAP_REPLACE(bpy.types.Menu):
bl_idname = "MIN_MT_CMAP_REPLACE"
bl_label = "Replace LUT"
def draw(self, context):
cmap_menus.draw_category_menus(self, context, "microscopynodes.replace_lut", "REPLACE")
def MIN_add_shader_node_menu(self, context):
if context.area.ui_type == 'ShaderNodeTree':
layout = self.layout
layout.menu("MIN_MT_CMAP_ADD", text="LUTs", icon="COLOR")
def MIN_context_shader_node_menu(self, context):
if context.area.ui_type == 'ShaderNodeTree' and bpy.context.area.type == "NODE_EDITOR":
if len(bpy.context.selected_nodes) == 1 and bpy.context.selected_nodes[0].type == 'VALTORGB':
layout = self.layout
layout.menu("MIN_MT_CMAP_REPLACE", text="Replace LUT", icon="COLOR")
layout.operator("microscopynodes.reverse_lut", text="Reverse LUT", icon="ARROW_LEFTRIGHT")
CLASSES = [MIN_MT_CMAP_ADD, MIN_MT_CMAP_REPLACE] + cmap_menus.CLASSES + ops.CLASSES | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/ops.py | .py | 4,179 | 113 | import bpy
from bpy.types import Context, Operator
from .handle_cmap import get_lut, set_color_ramp
from bpy.props import (StringProperty, FloatProperty,
PointerProperty, IntProperty,
BoolProperty, EnumProperty
)
# from .nodeCmap import cmap_node
class MIN_OT_Replace_LUT_Node_Group(Operator):
"""Replace LUT of color ramp"""
bl_idname = "microscopynodes.replace_lut"
bl_label = "Replace Lookup Table of Color Ramp node"
bl_options = {"REGISTER", "UNDO"}
cmap_name: StringProperty( # type: ignore
name="cmap", description="", default="", subtype="NONE", maxlen=0
)
description: StringProperty(name="Description")
def execute(self, context):
try:
lut, linear = get_lut(self.cmap_name, (1,1,1))
set_color_ramp(context.selected_nodes[0], lut, linear, self.cmap_name)
except RuntimeError:
self.report(
{"ERROR"},
message="Failed to replace lut. ",
)
return {"CANCELLED"}
return {"FINISHED"}
class MIN_OT_Add_LUT_Node_Group(Operator):
"""Add color ramp with LUT"""
bl_idname = "microscopynodes.add_lut"
bl_label = "Add Color Ramp with LUT Node"
bl_options = {"REGISTER", "UNDO"}
cmap_name: StringProperty( # type: ignore
name="cmap", description="", default="", subtype="NONE", maxlen=0
)
description: StringProperty(name="Description")
@classmethod
def description(cls, context, properties):
return properties.node_description
def execute(self, context):
try:
# nodes.append(self.node_name, link=self.node_link)
_add_cmap(self.cmap_name, context) # , label=self.node_label)
except RuntimeError:
self.report(
{"ERROR"},
message="Failed to add node. Ensure you are not in edit mode.",
)
return {"CANCELLED"}
return {"FINISHED"}
class MIN_OT_Reverse_LUT_Node_Group(Operator):
"""Reverse positions of LUT (duplicate of the arrow menu next to the ramp)"""
bl_idname = "microscopynodes.reverse_lut"
bl_label = "Reverse order of color ramp positions"
bl_options = {"REGISTER", "UNDO"}
cmap_name: StringProperty( # type: ignore
name="cmap", description="", default="", subtype="NONE", maxlen=0
)
def execute(self, context):
try:
color_ramp = context.selected_nodes[0].color_ramp
left, right = 0, len(color_ramp.elements) - 1
elements = [(float(el.position), list(el.color)) for el in color_ramp.elements]
for stop in range(len(color_ramp.elements) -1):
color_ramp.elements.remove(color_ramp.elements[0] )
for ix, el in enumerate(elements):
if len(color_ramp.elements) <= ix:
color_ramp.elements.new(0.01)
color_ramp.elements[0].color = el[1]
color_ramp.elements[0].position = abs(1 - el[0])
except RuntimeError:
self.report(
{"ERROR"},
message="Failed to reverse LUT, use the menu on the node itself!",
)
return {"CANCELLED"}
return {"FINISHED"}
def _add_cmap(cmap_name, context, show_options=False, material="default"):
"""
Add a node group to the node tree and set the values.
intended to be called upon button press in the node tree, and not for use in general scripting
"""
# actually invoke the operator to add a node to the current node tree
# use_transform=True ensures it appears where the user's mouse is and is currently
# being moved so the user can place it where they wish
bpy.ops.node.add_node(
"INVOKE_DEFAULT", type="ShaderNodeValToRGB", use_transform=True
)
node = context.active_node
node.outputs[1].hide = True
lut, linear = get_lut(cmap_name, (1,1,1))
set_color_ramp(node, lut, linear, cmap_name)
node.width = 300
CLASSES = [MIN_OT_Add_LUT_Node_Group, MIN_OT_Replace_LUT_Node_Group, MIN_OT_Reverse_LUT_Node_Group] | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/handle_cmap.py | .py | 1,376 | 37 | import bpy
import cmap
def set_color_ramp_from_ch(ch, ramp_node):
lut, linear = get_lut(ch['cmap'], ch['single_color'])
set_color_ramp(ramp_node, lut, linear, ch['cmap'])
return
def set_color_ramp(ramp_node, lut, linear, name):
from ...ui.preferences import addon_preferences
if addon_preferences(bpy.context).invert_color:
lut = list(reversed(lut))
for stop in range(len(ramp_node.color_ramp.elements) -2):
ramp_node.color_ramp.elements.remove( ramp_node.color_ramp.elements[0] )
for ix, color in enumerate(lut):
if len(ramp_node.color_ramp.elements) <= ix:
ramp_node.color_ramp.elements.new(ix/(len(lut)-linear))
ramp_node.color_ramp.elements[ix].position = ix/(len(lut)-linear)
ramp_node.color_ramp.elements[ix].color = (color[0],color[1],color[2],color[3])
if not linear:
ramp_node.color_ramp.interpolation = "CONSTANT"
else:
ramp_node.color_ramp.interpolation = "LINEAR"
ramp_node.label = name.capitalize()
return
def get_lut(name, single_color):
if name.lower() == "single_color":
lut = [[0,0,0,1], [*single_color,1]]
linear = True
else:
lut = cmap.Colormap(name.lower()).lut(min(len(cmap.Colormap(name.lower()).lut()), 32))
linear = (cmap.Colormap(name.lower()).interpolation == 'linear')
return lut, linear
| Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/nodeVolumeAlpha.py | .py | 3,266 | 71 | import bpy
from .nodeIgnoreExtremes import ignore_extremes_node_group
import cmap
def volume_alpha_node():
node_group = bpy.data.node_groups.get("Volume Transparency")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'ShaderNodeTree', name = "Volume Transparency")
links = node_group.links
interface = node_group.interface
interface.new_socket("Value", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Clip Min", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Clip Max", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = False
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Alpha Baseline", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 0.2
interface.items_tree[-1].attribute_domain = 'POINT'
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 100.0
interface.new_socket("Alpha Multiplier", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].default_value = 0.0
interface.items_tree[-1].attribute_domain = 'POINT'
interface.items_tree[-1].min_value = 0.0
interface.items_tree[-1].max_value = 100.0
interface.new_socket("Alpha", in_out="OUTPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (0,0)
# -- ALPHA extremes/mult --
ignore_extremes = node_group.nodes.new('ShaderNodeGroup')
ignore_extremes.node_tree = ignore_extremes_node_group()
ignore_extremes.location = (200, -200)
ignore_extremes.show_options = False
links.new(group_input.outputs.get('Value'), ignore_extremes.inputs.get('Value'))
links.new(group_input.outputs.get('Clip Min'), ignore_extremes.inputs.get('Ignore 0'))
links.new(group_input.outputs.get('Clip Max'), ignore_extremes.inputs.get('Ignore 1'))
mult_add = node_group.nodes.new("ShaderNodeMath")
mult_add.location = (200, 0)
mult_add.operation = "MULTIPLY_ADD"
links.new(group_input.outputs.get("Value"), mult_add.inputs[0])
links.new(group_input.outputs.get("Alpha Multiplier"), mult_add.inputs[1])
links.new(group_input.outputs.get("Alpha Baseline"), mult_add.inputs[2])
mult = node_group.nodes.new("ShaderNodeMath")
mult.location = (400, -100)
mult.operation = "MULTIPLY"
links.new(mult_add.outputs[0], mult.inputs[0])
links.new(ignore_extremes.outputs[0], mult.inputs[1])
# mult3 = node_group.nodes.new("ShaderNodeMath")
# mult3.location = (600, -150)
# mult3.operation = "MULTIPLY"
# links.new(mult.outputs[0], mult3.inputs[0])
# links.new(group_input.outputs.get("Alpha Multiplier"), mult3.inputs[1])
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (600, -100)
links.new(mult.outputs[0], group_output.inputs[0])
# links.new(mult3.outputs[0], group_output.inputs[1])
return node_group | Python |
3D | aafkegros/MicroscopyNodes | microscopynodes/min_nodes/shader_nodes/nodeIgnoreExtremes.py | .py | 2,504 | 69 | import bpy
def ignore_extremes_node_group():
node_group = bpy.data.node_groups.get("Ignore Extremes")
if node_group:
return node_group
node_group= bpy.data.node_groups.new(type = 'ShaderNodeTree', name = "Ignore Extremes")
links = node_group.links
interface = node_group.interface
interface.new_socket("Value", in_out="INPUT",socket_type='NodeSocketFloat')
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Ignore 0", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = True
interface.items_tree[-1].attribute_domain = 'POINT'
interface.new_socket("Ignore 1", in_out="INPUT",socket_type='NodeSocketBool')
interface.items_tree[-1].default_value = False
interface.items_tree[-1].attribute_domain = 'POINT'
group_input = node_group.nodes.new("NodeGroupInput")
group_input.location = (0,0)
# ignore 0
sub = node_group.nodes.new("ShaderNodeMath")
sub.inputs[1].default_value = 1.0
sub.location = (400, 100)
sub.operation = "SUBTRACT"
links.new(group_input.outputs[1], sub.inputs[0])
great = node_group.nodes.new("ShaderNodeMath")
great.location = (600, 100)
great.operation = "GREATER_THAN"
links.new(group_input.outputs[0], great.inputs[0])
links.new(sub.outputs[0], great.inputs[1])
# ignore 1
less = node_group.nodes.new("ShaderNodeMath")
less.location = (200, -100)
less.operation = "LESS_THAN"
links.new(group_input.outputs[2], less.inputs[0])
add = node_group.nodes.new("ShaderNodeMath")
add.inputs[1].default_value = 1.0
add.location = (400, -100)
add.operation = "ADD"
links.new(less.outputs[0], add.inputs[0])
less2 = node_group.nodes.new("ShaderNodeMath")
less2.location = (600, -100)
less2.operation = "LESS_THAN"
links.new(group_input.outputs[0], less2.inputs[0])
links.new(add.outputs[0], less2.inputs[1])
# combine
mult = node_group.nodes.new("ShaderNodeMath")
mult.location = (800, 0)
mult.operation = "MULTIPLY"
links.new(great.outputs[0], mult.inputs[0])
links.new(less2.outputs[0], mult.inputs[1])
interface.new_socket("Value", in_out="OUTPUT",socket_type='NodeSocketFloat')
interface.items_tree[0].attribute_domain = 'POINT'
group_output = node_group.nodes.new("NodeGroupOutput")
group_output.location = (1000, 0)
links.new(mult.outputs[0], group_output.inputs[0])
return node_group
| Python |
3D | aafkegros/MicroscopyNodes | docs/faq.md | .md | 440 | 8 | # Help and Contact
The main venue for **Usage questions** is the
{: style="height:15px"} [image.sc forum](https://forum.image.sc/tag/microscopy-nodes) and you can also search here for previous questions.
If you've found a **bug** (or suspect something even a little bit of being non-intended behaviour), don't be afraid to open an [issue](https://github.com/aafkegros/MicroscopyNodes/issues)!
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/outdated.md | .md | 1,045 | 24 | # Installing and using Microscopy Nodes with Blender < 4.2
## Install
- Download an appropriate microscopynodes/tif2blender zip file from the [releases page](https://github.com/oanegros/microscopynodes/releases). Please note the Blender version number.
Start blender.
Install the `microscopynodes` Add-On:
- In Blender go to `Edit > Preferences`
- Go to `Add-Ons` tab in `Preferences`
- Press `Install` and give the `tif_loader.zip` file (as .zip)
- In the added `microscopynodes` add-on window in `Preferences`: press the tick box to enable, and the arrow to unfold the details
- in the details press `install tifffile`
- (if this fails please try restarting blender and seeing if it can then find `tifffile`)
## Updating `microscopynodes`
To update the `microscopynodes` add-on (future versions may have bugfixes, new features) a few steps need to be taken:
- In Blender go to `Edit > Preferences`
- Go to `Add-Ons` tab in `Preferences` and find the `microscopynodes` add-on
- Press `Remove`
- Restart Blender
- Install the new version. | Markdown |
3D | aafkegros/MicroscopyNodes | docs/index.md | .md | 165 | 4 | <meta http-equiv="refresh" content="0; url=./tutorials/1_start/" />
If you are not redirected, [click here to start with the first tutorial](tutorials/1_start.md).
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/overview.md | .md | 2,021 | 39 | # Microscopy in Blender
`Microscopy Nodes` is a Blender add-on that incorporates bioimage support for the open-source software blender. {{ svg('microscopy_nodes') }} Microscopy Nodes simplifies loading bioimage (tif/zarr) files as volumetric objects in Blender.
Please make some pretty figures with this add-on!
For usage questions please use the [image.sc forum](https://forum.image.sc/tag/microscopy-nodes) 😁
For issues/bug reports/feature requests please [open an issue](https://github.com/aafkegros/MicroscopyNodes/issues).
If you publish with this add-on, please cite [the preprint](https://www.biorxiv.org/content/10.1101/2025.01.09.632153v1):
```
@article {Gros2025.01.09.632153,
author = {Gros, Oane and Bhickta, Chandni and Lokaj, Granita and Schwab, Yannick and K{\"o}hler, Simone and Banterle, Niccol{\`o}},
title = {Microscopy Nodes: versatile 3D microscopy visualization with Blender},
elocation-id = {2025.01.09.632153},
year = {2025},
doi = {10.1101/2025.01.09.632153},
publisher = {Cold Spring Harbor Laboratory},
URL = {https://www.biorxiv.org/content/early/2025/01/14/2025.01.09.632153},
eprint = {https://www.biorxiv.org/content/early/2025/01/14/2025.01.09.632153.full.pdf},
journal = {bioRxiv}
}
```
## Current Features
Microscopy Nodes supports:
- up to 5D (tzcyx in any axis order) tifs and OME-Zarr files can be loaded.
- Channel interface to define how to load data
- Replacing a pyramidal dataset with it's higher resolution version
- Accurate scale bars and boxes
- Loading of per-index label masks
- Lazy loading of giant files (no data is loaded in RAM outside what's rendered)
### [Get Started!](./tutorials/1_start.md)
<img src="https://github.com/aafkegros/MicroscopyNodes/blob/main/figures/newprettyside.png?raw=true" width="600"/>
*All icons used except the Microscopy Nodes icon were designed for Blender by [@jenzdrich](https://blenderartists.org/t/new-icons-for-blender-2-8/1112701) under [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.en)* | Markdown |
3D | aafkegros/MicroscopyNodes | docs/clean_svg.py | .py | 1,084 | 32 | import xml.etree.ElementTree as ET
import re
from svgpathtools import parse_path, Path
import sys
def clean_and_scale_svg(input_path, output_path, scale=0.01):
ET.register_namespace('', "http://www.w3.org/2000/svg")
tree = ET.parse(input_path)
root = tree.getroot()
for elem in root.findall(".//*"):
if elem != root and 'fill' in elem.attrib:
del elem.attrib['fill']
elem.attrib['fill'] = "currentColor"
del root.attrib['width']
del root.attrib['height']
# root.attrib['width'] = str(float(root.attrib['width'])/100)
# root.attrib['height'] = str(float(root.attrib['height'])/100)
tree.write(output_path, encoding='utf-8', xml_declaration=False)
# print(root)
print(f"Cleaned and scaled SVG saved to {output_path}")
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: python clean_scale_svg.py input.svg output.svg [scale]")
else:
scale_val = float(sys.argv[3]) if len(sys.argv) == 4 else 0.01
clean_and_scale_svg(sys.argv[1], sys.argv[2], scale=scale_val)
| Python |
3D | aafkegros/MicroscopyNodes | docs/tutorials/preferences.md | .md | 1,219 | 24 | # Preferences / Customization
The {{ svg("microscopy_nodes") }} Microscopy Nodes addon has {{ svg("preferences") }} **Preferences** to allow for a custom experience and defaults.
You can find these under `Edit > Preferences > Add-ons > Microscopy Nodes`.

Here we get multiple options for defaults and settings:
- Default "Path"
> The cache path that is set if the [load option](./2_loading_data.md#5-extra-import-settings-optional) `Path` is selected
- Default "Temporary"
> The cache path that is generated when [load option](./2_loading_data.md#5-extra-import-settings-optional) `Temporary` is selected
- Default channels + channel number
> This defines the default settings for the [channel interface](./2_loading_data.md#4-set-channels) when a new dataset is loaded. If more channels are present in the data than defaults, the list revolves.
- Mesh density
> This sets how fine/coarse the geometries for labelmasks and surfaces are
- Invert Color
> Inverts all colormaps on load and replace
- Overwrite local files (debug)
> If reloading fails for some reason, this is useful to check, but is usually only used for development.
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/large_data.md | .md | 4,016 | 74 | # Large data
Microscopy data is often very large, {{ svg("microscopy_nodes") }} Microscopy Nodes has some strategies to deal with this. These depend on the size of the data, the shape of the data, and your computational resources (and skills). The key of this is working at a smaller scale, and then **reloading** to larger data.
Here it is relevant to distinguish between two types of large data:
- Time sequence/few channels (not loading all data in the same timeframe)
> This mostly has issues in storage, and is more a point of only loading the data you need at a certain point, but will always just work.
- Large data that's all concurrently loaded
> This becomes a bigger issue, especially with the current **4 GiB limit of EEVEE**.
!!! warning "EEVEE cannot handle large data"
EEVEE currently **cannot handle** [volumetric data over 4 GiB](https://projects.blender.org/blender/blender/issues/136263). This means that scaling up our data will be easier in Cycles. By default after loading, the render engine is set to Cycles.
**Cycles is only applied** to the view if the shading preview is set to {{ svg("shading_rendered", 'small-icon') }} Rendered preview.
There are a few strategies:
## Working at large scale
You can actually work at large scales, as long as you have enough and take care to work **only in Cycles**:
Some {{ svg("workspace") }} workspaces will **automatically** be in {{ svg("shading_texture") }} Material preview mode. It is convenient to first go through the different {{ svg("workspace") }} workspaces and switch them all to {{ svg("shading_rendered") }} Rendered preview.
## **Reloading**
The **reloading** workflow means that you first work on a smaller version of the data and later replace this with a larger one.
This is controlled mainly by the {{ svg("file_refresh") }} Reload field in the {{ svg("microscopy_nodes") }} Microscopy Nodes load panel:

This can be pointed to a previously loaded Microscopy Nodes [holder object](./3_objects.md#holder):

Which will make a new action **Reload** the data in the holder. This has two extra options:
- {{ svg("file") }} Overwrite data
> Changes out the underlying data (labelmasks are currently always regenerated)
- {{ svg("material") }} Overwrite settings
> Overwrites all settings from the load panel, but retains anything set by the user, this includes input location/transform, emission, and color
For reloading to deal with large data, it is usually best to reload only updating {{ svg("file") }} the underlying data to the higher resolution.
### Reloading on workstation or cluster
It is possible to make a project on a less capable computer, and then transfer it to a workstation or an HPC cluster. This can be done in multiple ways:
- Transfering with data
> Easiest to do with your data loaded 'With Project' in the [extra import settings](./2_loading_data.md#5-extra-import-settings-optional), then it is an easy matter of transferring the .blend file and the data folder next to it to this computer.
- Reloading from a GUI
- Reloading from the command line
### Reloading from the command line
To replace your data from the command line interface, you set up all the {{ svg("file_refresh") }} reloading and import settings in the GUI (this currently works best with {{ svg("material") }} Overwrite settings off) and run a headless python script that looks like this:
```
import bpy
bpy.ops.microscopynodes.load_background()
bpy.ops.wm.save_mainfile()
```
by running:
`/path/to/Blender/executable -b /path/to/blendfile.blend -P /path/to/reload_script.py`
This will then load the data according to the Microscopy Nodes settings, and resave the .blend file.
You can subsequently render headlessly as well, here you can follow the {{ svg('blender') }} [Blender documentation on this](https://docs.blender.org/manual/en/latest/advanced/command_line/render.html).
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/1_start.md | .md | 6,347 | 111 | # 1. First Use
## **Installing Microscopy Nodes**
{{ youtube("BFMX0Dk5rIw", 360, 200) }}
1. Open Blender.
2. Navigate to `Edit > Preferences`.
3. In the Add-ons tab, search for `Microscopy Nodes`.
4. Click **Install** to download and enable the add-on.
## **Blender Interface Overview**
The Blender interface is very flexible and can be reconfigured in many ways. While this is a powerful feature, it also means that explaining the basics can be a bit technical, and some of the terms are Blender **jargon**. To make things easier, here is a quick overview of some **key terms** and where to find common functions.
Further information and navigation can be found in the {{ svg("Blender") }} [Blender Manual](https://docs.blender.org/manual/en/latest/editors/3dview/navigate/index.html)

The Blender interface always contains:
1. {{ svg("topbar") }} **Top bar**: contains the main menus and selection of the tabs, or {{ svg("workspace") }} workspaces (e.g. Layout, Shading, Geometry Nodes).
2. {{ svg("workspace") }} **Workspace**: Reconfigurable workspace. Contains different areas depending on the selection in the {{ svg("topbar") }} topbar.
3. {{ svg("statusbar") }} **Status bar**: contains shortcuts suggestions
But it can be configured much more with **workspaces** {{ svg("workspace") }}. Currently we're in the **Layout** workspace.
## Layout Workspace
The **Layout** workspace {{ svg("workspace") }} (by default selected in the {{ svg("topbar") }} *topbar*) is our main workspace, made for assembling and seeing your 3D scene. This contains multiple elements with Blender-specific names:

1. {{ svg("view3d") }} **3D Viewport**: Main 3D interaction area.
1. {{ svg("outliner") }} **Outliner**: Tree view of all objects in the *scene*. This is the easiest place to *select* objects.
2. {{ svg("properties") }} **Properties Editor**: Edit properties of the scene and the selected object. Under {{ svg("scene_data") }} you can find *Microscopy Nodes*.
3. {{ svg("time") }} **Timeline**: For animation.
With Microscopy Nodes, we also use the [Shading]() workspace, and for advanced users, the [Geometry Nodes]() and [Scripting]() workspaces.
## The 3D Viewport
Annotated on the right in the image are widgets you can drag to **rotate** (axes), {{ svg("view_zoom") }} **scale** and {{ svg("view_pan") }} **move** the view.
Mouse navigation is possible and configurable in the {{ svg("blender") }} [Preferences](https://docs.blender.org/manual/en/latest/editors/preferences/input.html). This depends on which input device you use (2-button mouse, 3-button mouse, touchpad).
### The `View` menu
{: style="height:200px"}
At the top of the {{ svg("view3d") }} 3D viewport, there is a dropdown menu called `View` - this has shortcuts and other tools to align the view.
For example, if you lose all the objects in the scene, you can select an object in the {{ svg("outliner") }} outliner in the top right, and use the menu `View > Frame Selected` (or just `View > Frame All`) to see your scene again.
## The outliner

The {{ svg("outliner") }} outliner lists all {{ svg("outliner_collection") }} collections and objects in the scene. Here you can **select** objects more easily.
This also provides an interface for **visibility** in the {{ svg("hide_off") }}/{{ svg("hide_on") }} 3D viewport, and in the {{ svg("restrict_render_off") }}/{{ svg("restrict_render_on") }} final render. If objects are not visible, they are also not loaded into RAM, so it can speed up Blender to limit visibility.
## Manipulating Objects
Annotated on the left in the image are widgets you can drag to **select**, **move**, **rotate** and **scale** objects. The transform widgets spawn a *gizmo*: a mouse-clickable interaction interface:

Transforms can also be done with hotkeys: `G` for grab/move, `R` for rotate, `S` for scale. The transformation can be locked to an axis with the `X`, `Y` or`Z` key.
### Adding an object
At the top of the 3D viewport is an `Add` menu, from which you can add different primitive objects, such as a camera or lights. This is also findable under the key combination `Shift + A`
To add microscopy data, there is a [separate loading window](./2_loading_data.md).
### Deleting objects
You can select any object in the {{ svg("view3d") }} viewport or {{ svg("outliner") }} outliner, and delete it by `Right Mouse Button > Delete Object` or pressing `X` and confirming.
For deleting all objects in the scene, it is fastest to press `A` to select all objects and `X` to delete them.
In the {{ svg("outliner") }} **outliner**, an entire group can be deleted at once with `Right Mouse Button > Delete Hierarchy`
## **Viewport rendering**
In the top right of the viewport you can change the way the contents are shown.
{{ svg("microscopy_nodes") }} Microscopy Nodes volume data will only be visible in **Material Preview** and **Rendered** mode.

From left to right:
1. {{ svg("shading_wire") }} **Wireframe** : Only the object skeleton, *No volumetric data shown.*
2. {{ svg("shading_solid") }} **Solid Mode**: Only the external surfaces are drawn
3. {{ svg("shading_texture") }} **Material Preview**: Is meant for previewing your scene without full calculations. Defaults to [EEVEE](./rendering.md#eevee). May be a fast view, but will be slow to open with microscopy data, and is data-dependent.
4. {{ svg("shading_rendered") }} **Rendered**: Shows the scene as it will appear in the final render. By default, Microscopy Nodes sets this to be in [Cycles](./rendering.md#cycles). Often the best way to view microscopy data.
## **Further UI instruction (video)**
<div class="yt-lazy" data-id="enTid4aDC0Q" style="width:560px; height:315px;">
<div class="yt-thumbnail" style="background-image: url('https://img.youtube.com/vi/enTid4aDC0Q/hqdefault.jpg');">
<div class="yt-play-button"></div>
<div class="yt-overlay-text">
Click to load video from YouTube.
<br />
By clicking, you agree to YouTube’s privacy policy.
</div>
</div>
</div> | Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/rendering.md | .md | 4,343 | 75 | # Rendering
There are a lot of extra parameters that can be adjusted to optimize rendering in Blender. All of these are explained in the {{ svg("blender") }} [Blender manual](https://docs.blender.org/manual/en/latest/render/cycles/render_settings/index.html). Some, however, are especially useful to know for microscopy data or for new users. These are covered here.
## Render Engines
There are two major render engines in Blender. **EEVEE**, a [rasterized](https://en.wikipedia.org/wiki/Rasterisation) render engine, and **Cycles** a [ray-traced](https://en.wikipedia.org/wiki/Ray_tracing_(graphics)) engine.
### EEVEE
**EEVEE** is a render engine that is made to be fast, and powerful. It is less optimized for {{ svg("volume") }} volumetric data, especially for dense/scattering volumes. Currently, it can only handle volume data < 4 GiB.
However, it is still very strong, is usually able to combine semi-transparent masks and volumes, and is often faster, especially for rendering emissive time-lapses.
It may take longer to open an EEVEE interface with volumes, and for larger data it can be slower in general than Cycles.
### Cycles
**Cycles** is the ray-traced render engine, slower than EEVEE as it calculates light bouncing. This is best for scattering, and can be faster with bigger data.
## Render Settings
The {{ svg("scene") }} render settings can be found in the {{ svg("properties") }} properties.
### Samples
The number of samples is a metric for how much time the rendering algorithm takes to make an image. Lower samples mean more noise, but quicker render times, a high number of samples makes nicer images, but takes more time.
### Volume scattering (Cycles)
The amount of scattering in a volume is very important for the visualization of {{ svg("light") }} dense/scattering volumes. These are not as important in emissive {{ svg("outliner_ob_light") }} volumes.
This is only well-defined in a raytracer, so this is only available in Cycles. These can significantly affect performance.
This can be controlled with two parameters:
- Volumes > Max Steps
> The depth of traversal through the volume
- Light Paths > Max Bounces > Volume
> Total amount of volume scattering events
In addition, the `Volumes > Step Rate` can be changed, but only if artefacts show up. This can often also be helped by downscaling the volume (through the [holder](./3_objects.md#holder)).
### General scattering (Cycles)
The number of light bounces can be relevant also outside of the volumes. Especially for `Light Paths > Max Bounces > Transparent` and `Light Paths > Max Bounces > Total`. This is relevant to avoid black artefacting when overlaying many (semi-)transparent meshes.
### Transparent background
This can be found under Film > Transparent. Note that a transparent background can and will still be able to light a scene, if the {{ svg("world") }} background color is not black.
### Color Management
The `Color Management > View Transform` is set by default to `Standard` after loading with Microscopy Nodes, doing no postprocessing of colors in the final image. The default of Blender is `AgX`. This postprocesses the colors in the image to make them look more 'real'/'better', and has multiple **Looks** (high contrast, punchy etc) to choose from.
## Output settings
The {{ svg("output") }} output settings can be found in the {{ svg("properties") }} properties.
### Time
The timing of output can be changed. The **Frame Rate** for output videos and previews can be changed under `Format > Frame Rate`.
**Time Stretching**, under `Frame Range`, is the best way to offset Blender-frame rate from your volume's frame rate. Stretching time here allows e.g. more frames of camera movement per timeframe of your biological sample.
{{ youtube("jcERgoBI1b8", 280, 158) }}
### Output location and format
Under {{ svg("output") }} > Output, the output file location and format can be defined. Here it is useful to note that if the format is set to `PNG`, as default, you would still have to compile the frames to video later.
Optionally, you can set the output format to `FFMPEG Video`, which will output a full video when you render an animation. This does limit your capacity to edit the video encoding and you cannot stop the render e.g. halfway and still retain the first half of the output. | Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/2_loading_data.md | .md | 6,250 | 136 | # Loading microscopy data
## Video tutorials
The **Fluorescence** tutorial shows how to load *emissive* data, and the **EM** tutorial shows how to load *scattering data*, these settings can be good to interchange!
{{ youtube("lroStEHiPV8", 280, 158) }}
{{ youtube("Rwq7Tu8Avss", 280, 158) }}
The **labelmask/surface** tutorial is shown for EM data, but can be useful for any data type and binary or label masks:
{{ youtube("YO3FxTFGH00", 280, 158) }}
## 1. Point to your data
1. [Delete](./1_start.md#deleting-objects) everything in the scene with `A` and `X`
2. In the {{ svg("scene_data") }} Scene Properties panel, find the **{{ svg("microscopy_nodes") }} Microscopy Nodes** panel.
3. Provide the path to your data set:
> local TIFF file (preferably imagej-tif, but others work)
> OME-Zarr URL
> local OME-Zarr folder
For **local files**, you can use the file explorer {{ svg("file_folder") }}.
{{ svg("error") }} With OME-Zarr URLs/folders, **copy the address directly** into the field. OME-Zarr links are not clickable. If the metadata does not populate, check out our tips for [troubleshooting OME-Zarr](./ome_zarr_troubleshooting.md).
!!! example "Example OME-Zarr datasets:"
- [https://s3.embl.de/microscopynodes/RPE1_4x.zarr](https://s3.embl.de/microscopynodes/RPE1_4x.zarr) ; Showing expansion microscopy of an RPE1 cell with cytoskeletal elements stained
- [https://s3.embl.de/microscopynodes/FIBSEM_dino_masks.zarr](https://s3.embl.de/microscopynodes/FIBSEM_dino_masks.zarr) ; Showing a dinoflagellate FIB-SEM dataset with segmentations
- The [Image Data Resource OME-Zarr archive](https://idr.github.io/ome-ngff-samples/). Some may [not work](./ome_zarr_troubleshooting.md).
## 2. Select scale *(optional)*
Microscopy Nodes **automatically** selects the smallest scale of data available.
Downscaled versions get created if the data is over 4 GiB per timepoint. For OME-Zarr, all premade scales are also shown.

Any scale with a volume icon {{ svg("outliner_data_volume") }} will easily work in any part of Blender. The `1` icon is of a size where a single channel will definitely work. For larger datasets, check out the [large data tutorial](./large_data.md).
## 3. Check metadata
The metadata populates **automatically** from the file:

This contains:
- Pixel Sizes
> This may be truncated in the view, up to 6 decimal places are used.
- Pixel Units
> Å to m, or 'arbritrary unit (a.u)'
- Axis order
> A piece of text such as 'tzcyx'. number of letters needs to match the number of axes. Allows remapping of axis order by editing the text field.
- Time (only if time axis exists)
> Start and end frame, allows you to clip the time axis before loading.
## 4. Set channels
Next we see the channel interface:

From left to right:
- Channel name (editable)
- Visualization types:
- Volume {{ svg("outliner_data_volume") }}
- Surface {{ svg("outliner_data_surface") }}
- Labelmask {{ svg("outliner_data_pointcloud") }}
- Emission on/off {{ svg("light") }}
- Colormap type:
- Single Color {{ svg("mesh_plane") }}
- Linear {{ svg("ipo_linear") }}
- Diverging {{ svg("lincurve") }}
- Categorical {{ svg("outliner_data_pointcloud") }}
- Color Picker ( if {{ svg("mesh_plane") }} )
The **Visualization type** defines which [objects](./3_objects.md) will be loaded. If **none** are clicked in a channel, this channel will not be loaded.
When loading with **Emission** on {{ svg("outliner_ob_light") }}, the objects of this channel will by default emit light. If this is off {{ svg("light") }}, they will reflect/scatter light from the scene or background.
The **Colormap** choice gives basic options for color before loading. If
{{ svg("mesh_plane") }} Single Color is picked, the colormap will be linearly black -> color picked in the color picker.
Defaults can be changed in the [preferences](./preferences.md).
!!! warning "Labelmasks"
Labelmasks {{ svg("outliner_data_pointcloud", "small-icon") }} expect an array with separate integer values per object. If it gets a data channel, it will try to still split it into separate objects
## 5. Extra import settings (optional)
These settings are below the `Load` button as they are not essential to remap for your first load. They can be useful to change if you're using Microscopy Nodes more often, or have specific needs. Most of these will persist between sessions.

This includes the **Data Storage** - where the intermediate files get stored
- Temporary (Default)
> Puts the data in a temporary file, you can check the temporary path in the [preferences](./preferences.md)
- Path
> Gives a field to put in a location.
- With Project
> Will create a folder next to the project location. Requires that the project is saved
{{ svg("world") }} overwrite the world color upon loading. This is useful as the world color (white, black or grey) is used as default lighting.
{{ svg("scene") }} overwrite [render settings](./rendering.md) upon loading. This turns itself off after the first load, to avoid overwriting custom settings.
{{ svg("con_sizelike") }} defines the **input transform** - Blender works in meters, but *Microscopy Nodes* uses this as multiple optional coordinate spaces:
- `px -> cm`
> Default, scales the object such that each pixel takes 1 cm space in XY. Scales the Z axis such that it is isotropic with XY.
- `Å -> m`
- `nm -> m`
- `µm -> m`
- `mm -> m`
- `m -> m`
- `nm -> cm (Molecular Nodes)`
{{ svg("orientation_parent") }} defines the **input location**:
- `XY Center`
- `XYZ Center`
- `Origin`
!!! warning "Choosing an input transform"
Note that you may need to go one scale higher than you expect with the {{ svg("con_sizelike", "small-icon") }} input transform, as a few meters is already quite large (the default cube is 2 m). The normal unit is the size of your dataset, and not always the unit of your pixel size.
## 6. Load
Press the big `Load` button to load a dataset
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/ome_zarr_troubleshooting.md | .md | 1,750 | 26 | # OME-Zarr troubleshooting
[OME-Zarr](https://ngff.openmicroscopy.org/about/index.html) is a developing standard and is very flexible, which sometimes makes it hard to read and write, and no software supports all features.
{{ svg("microscopy_nodes") }} Microscopy Nodes supports OME-Zarr **up to version 0.5**, to load single, up to 5-dimensional, arrays.
!!! tip "Is your OME-Zarr not loading?"
A quick option is to append `/0` after your path. Some OME-Zarr writers create a **group** at the .zarr adress, with the first (and often, only) image at .zarr/0
## OME-Zarr collections
There is currently **no support** for any form of **self-discovering** collections from OME-Zarr, this can cause issues for:
- Wells
- Fields
- Labels
- Large Zarr-groups (such as OpenOrganelle datasets)
- Bioformats2raw export
All of these images **can still be opened**, by pointing to the **specific path** of the array, which contains the different OME 'multiscales'.
!!! example "Pointing to a specific path"
For example, for the dataset [https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr](https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr) of the IDR, the labels are at [https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr/labels/Cell](https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr/Cell) and [https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr/labels/Chromosomes](https://uk1s3.embassy.ebi.ac.uk/idr/zarr/v0.3/idr0052A/5514375.zarr/Chromosomes). These are not auto-discovered, but can be loaded with the specific paths.
Support for collections will come in the future, but this will wait for the planned reorganization OME-Zarr collection structure.
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/5_creating_ouput.md | .md | 3,948 | 68 | # Creating output
Creating output from a scene in Blender is done by adding a {{ svg("view_camera") }} camera and pressing `Render > Render Image` or `Render > Render Animation` for the full animation. This writes images or movies to your [output folder](./rendering.md#output-location-and-format).
{{ youtube("jcERgoBI1b8", 360, 200) }}
## Adding and moving the camera
Adding a camera can be done from the {{ svg("view3d") }} 3D viewport by pressing `Add` or `Shift + A`, and finding the {{ svg("view_camera") }} camera in the submenu.
The camera can be moved as any object but has a few extra features.
### Active Camera
The active camera is the camera in the scene that will be used in rendering, and can be switched and selected through the green {{ svg("view_camera") }} camera icon in the {{ svg("outliner") }} outliner.

### Viewing through the active camera
The view through the current **active** {{ svg("view_camera") }} **camera** can be seen by clicking the {{ svg("view_camera") }} camera icon on the right of the {{ svg("view3d") }} 3D viewport.
### Aligning view
The **easiest** way to set the camera direction is by aligning it to the view. This means:
- Setting a nice view angle and position
- Aligning the camera with `View > Align View > Align Active Camera To View`
### Camera properties
Camera properties can be found under the {{ svg("view_camera") }} camera icon in the {{ svg("properties") }} properties. This is where the camera can be switched between **perspective** and **orthographic** modes, or the focal length set for a perspective camera.
!!! note "Perspective and orthographic cameras"
Perspective cameras are the default of Blender, and often the type of camera we are most used to in daily life. These cameras have a virtual lens, providing a perspective transform, making distant objects appear smaller than near objects.
In contrast to this, orthographic cameras show all objects as the same size, no matter the distance to the camera. This thus can be better for comparing scales of objects in data, although it is less intuitive to viewers.
## Setting up lighting
Lighting can be very important for getting a good feeling of depth for the data, but it is not always necessary or essential.
In {{ svg("outliner_ob_light") }} emissive renders, it is often not necessary to set up light.
In {{ svg("light") }} non-emissive renders, the Microscopy Nodes default is to use the {{ svg("world") }} world color and brightness to light the scene. However, setting up specific lighting add depth to renders.
You can set up lights by adding them from the `Add` menu, as objects, moving them and changing their `Power` and `Color` in the {{ svg("properties") }} properties.
## Basic animation
Animation can be very intricate, but the basics and getting started can be straightforward. The way this works is by setting {{ svg("keyframe") }} keyframes. These are a set value at a set timepoint, if this value is different at another keyframe, Blender will interpolate between the two points.
The nice part of Blender is that almost any value can be keyframed. You can set a keyframe by hovering over a value and pressing `I`.
So a simple camera animation setup takes:
- Set the camera
- Set the {{ svg("time") }} timeline at timepoint 0
- Hover over the camera `Location` and `Rotation` in the {{ svg("properties") }} properties and press `I`, setting a keyframe.
- Set the {{ svg("time") }} timeline at timepoint 100
- Set the camera to a new position
- Again hover over the camera `Location` and `Rotation` in the {{ svg("properties") }} properties and press `I`, setting a keyframe.
Then in between the two keyframes, Blender will interpolate the camera positioning.
## Rendering
Rendering can be done with `Render > Render Image` or `Render > Render Animation`. It can be useful to still check out the [render settings](./rendering.md) | Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/3_objects.md | .md | 5,223 | 115 | # 3. Objects
Microscopy Nodes loads your microscopy data as different types of **objects**, depending on how you loaded each channel.

Each type of object is placed in a {{ svg("outliner_ob_empty") }} **holder** collection. The **Axes** and **Slice Cube** are always present.
You can select an object by clicking on it in the {{ svg("outliner") }} outliner (as shown in the screenshot) and change its properties:
- Change **underlying data** in the {{ svg("modifier") }} modifier menu of the {{ svg("properties") }} properties or the (*advanced*) Geometry Nodes workspace {{ svg("workspace") }}
- Change **visualization** in the {{ svg("material") }} material menu of the {{ svg("properties") }} properties or the Shader Nodes workspace {{ svg("workspace") }}
The exact settings and where to change them change per object, so see below.
---
## Holder
The {{ svg("outliner_ob_empty") }} **Holder** is an empty object which is the `parent` of the other Microscopy Nodes objects.
The holder can be **scaled**, **moved** and **rotated** and then **all of its objects** will be transformed along with it.
## Axes
The {{ svg("outliner_ob_mesh") }} **Axes** object is always loaded with your dataset. It draws a **scale grid** based on the number of pixels, pixel size, and pixel unit.
- {{ svg("modifier") }} Geometry options
- `pixel unit` per tick
> The distance between grid lines
- Grid
> Whether to draw a grid or only a box
- Line thickness
> Thickness of lines in arbitrary units
- Frontface culling
> If ticked, clips out the axes that are closest to the camera or viewpoint, so that they do not obstruct the view.
- Separate planes
> For each plane (xy bottom, top etc) you can select whether they will be drawn
- {{ svg("material") }} Shader options
- Color
Scale grids can be **moved**, **scaled** and **rotated** independently of the holder without losing their accuracy.
!!! note "Bars versus grids"
In *Microscopy Nodes*, only scale grids are shown. Blender’s default cameras are perspective cameras, where traditional scale bars are not very meaningful. We'll probably add support for some form of scale bar in the future for orthographic renders.
---
## Volumes
The {{ svg("outliner_ob_volume") }} **Volume** holds channels of **volumetric** data, which can be rendered either as emitting or scattering light. It is generated when you enable {{ svg("outliner_ob_volume") }} **Volume** during loading.
- {{ svg("modifier") }} Geometry options
- Included channels
> If channels are not included, they are also not loaded into RAM
- {{ svg("material") }} [Shader options](./4_shading.md#volume-shading)
- Pixel intensities
- Opacity calculation
- Color LUT
The easiest way to edit a volume shader is in the {{ svg("workspace") }} Shader Nodes workspace, where you can most easily switch between channels in the {{ svg("properties") }} properties.
You can toggle between emission and scattering modes using the {{ svg("light") }} emission toggle in [loading](./2_loading_data.md).
---
## Surfaces
The {{ svg("outliner_ob_mesh") }} **Surface** object is a mesh extracted from a volume using an **isosurface threshold**. It is generated when you enable {{ svg("outliner_ob_surface") }} **Surface** during loading.
- {{ svg("modifier") }} Geometry options
- Included channels
- Threshold
> The intensity value above which the surface is extracted.
- Voxel size *(only listed if {{ svg("preferences") }} [Mesh Resolution](./preferences.md) is not `Actual`)*
> Interactive scalable unit for mesh detail
- {{ svg("material") }} [Shader options](./4_shading.md#surface-shading)
- Standard mesh shading parameters (color, opacity etc)
---
## Label Masks
The {{ svg("outliner_ob_mesh") }} **Label Mask** object is a mesh generated from a **label image**, such as a segmentation channel. It is generated when you enable {{ svg("outliner_ob_pointcloud") }} **Labelmask** during loading.
**Each value** in the volume is turned into a separate mesh.
- {{ svg("modifier") }} Geometry options
- Included channels
- {{ svg("material") }} [Shader options](./4_shading.md#labelmask-shading)
- Color per label
- Revolving colormap or linearly distributed among objects
- Standard mesh shading parameters (color, opacity etc)
---
## Slice Cube
The {{ svg("outliner_ob_mesh") }} **Slice Cube** is a movable object that defines the visibility of other objects.
The slice cube is inherently nothing else than a Cube with a transparent shader. The linkage to its transparency is done from the {{ svg("material") }} shader **of the sliced object**. This means you can also add a new cube and point to this instead.
This has no {{ svg("modifier") }} Geometry options or {{ svg("material") }} Shader options
---
???+ info "How the **Microcopy Nodes** objects work"
The data objects are Geometry Nodes objects that reference preloaded data stored in the `cache` collection. In the **Geometry Nodes** workspace {{ svg("workspace", "small-icon") }} you can add edit the loaded data and add modifiers.
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/surface_smoothing.md | .md | 1,060 | 26 | # Surface modification
After loading a {{ svg("outliner_data_pointcloud") }} labelmask or {{ svg("outliner_data_surface") }} surface, the geometry is often still quite jagged.
This can be edited through two techniques:
- changing the **mesh density** in the [preferences](./preferences.md) and reloading
- adding smoothing modifiers
- editing the mesh using sculpting or modeling
## Adding modifiers
Modifiers can be added under the {{ svg("modifier") }} modifiers in the {{ svg("properties") }} properties, under the `+ Add Modifier` button.
Useful smoothing modifiers are:
- {{ svg("mod_subsurf") }} Surface Subdivision
- {{ svg("mod_smooth") }} Smooth
- {{ svg("mod_smooth") }} Smooth Corrective
- {{ svg("mod_smooth") }} Smooth by Laplacian
Especially {{ svg("mod_subsurf") }} Surface Subdivision is useful, although this can create too many vertices (which you could then again destroy with something such as a {{ svg("mod_decim") }} Decimate modifier)
These methods will distort your geometry, so use only in cases where you can allow this.
| Markdown |
3D | aafkegros/MicroscopyNodes | docs/tutorials/4_shading.md | .md | 9,255 | 150 | # Shading
**Shading** encompasses the visualization of Blender's objects. The shading options can be found in two places:
- in the {{ svg("workspace") }} Shader Nodes workspace, find this in the {{ svg("topbar") }} topbar.
- in the {{ svg("material") }} material tab of the {{ svg("properties") }} properties.
These two locations contain the same information, laid out in different ways.
The default Microscopy Nodes shaders are built from {{ svg("nodetree") }} nodes, and contains information on how the object interacts with **light** and its transparency. The defaults are listed here separately for the different [Microscopy Nodes data-objects](./3_objects.md).
## Volume Shading
The Shader Nodes workspace {{ svg("workspace") }} when selecting a Microscopy Nodes {{ svg("outliner_data_volume") }} volume:
{: style="height:130px"}
### Data Loading

This is where the data gets read out from the vdb grid (as handed over from the Geometry Nodes) and gets normalized. You will usually not need to edit this.
??? warning "Reusing shaders"
The normalization that is done in **Normalize Data** is dependent on the specific data, as it rescales the min and max value of the data to 0 and 1 - after it's already transformed to small floating point values for saving to .vdb files.
Essentially, this means its best to **keep the normalization** of new data when you replace the rest.

### Pixel Intensities
The pixel intensities rescale the min and max value, and thus the linear interpolation of the data. This is analogous to a Fiji **Brightness & Contrast** window.
You can move the two handles to move the **min** and **max**.

??? note "How this works"
This is a Blender `Color Ramp` that only outputs Alpha, and not Color. We feed in normalized data between 0 and 1 (as represented in histogram) and map this to the color ramp. The color ramp is two nodes of alpha 0 (min) and 1 (max).
This also means you can add extra nodes in here if you want nonlinearity in your pixel intensities, or flip the nodes to invert. However, it is often easier to just change the colormap.
### Color LUT
{: style="height:200px"}
{: style="height:200px"}
The lookup tables are `Color Ramp` objects, LUTs can be edited:
- **Editing** handles
- You can drag to change its position and click on it to get a color picker. To change contrast, its recommended to change the *pixel intensities* instead of the color.
- The bottom fields are the *index*, *position* and *color* of the selected field - allowing editing of the handles with more precision
- **Replacing** the LUT by {{ svg("mouse_rmb") }} right clicking the LUT and selecting {{ svg("color") }} LUTs. This lists multiple [colormaps](https://cmap-docs.readthedocs.io).
- {{ svg("ipo_linear") }} Sequential, monotonic rising or falling, often good for microscopy
- {{ svg("lincurve") }} Diverging, distinctive middle of the colormap
- {{ svg("mesh_circle") }} Cyclical, start and end together
- {{ svg("outliner_data_pointcloud") }} Qualitative, separates consecutive values, good for labelmasks
- {{ svg("add") }} Miscellaneous
- {{ svg("mesh_plane") }} Single Color, gives a new black-to-white colormap, to easily edit LUTs
- {{ svg("arrow_leftright") }} Flipping the LUT
- either under the down-carrot or under {{ svg("mouse_rmb") }} right clicking the LUT
- Flipped LUTs can be [loaded by default](./preferences.md)
### Opacity

The tranparency window describes the total contribution of each voxel to the image. If you are in an emission mode, this defines the volume **brightness**, in scattering mode, this describes the volume **density**.
Here there are multiple options:
- Clip Min
- Sets all values at 0 as transparent (left from the **min** in *Pixel Intensities*).
- Clip Max
- Sets all values at 1 to transparent (right from the **max** in *Pixel Intensities*).
- Alpha Baseline
- Constant alpha for all voxels that are not *Clipped*.
- Alpha Multiplier
- Alpha value that multiplies the input values, and thus linearly increases with intensity. Does not affect *Clipped* values. Adds onto *Alpha Baseline*.
### Shaders (emission/scatter)
This is where the *Microscopy Nodes* pre-processing hooks into the default Blender volume interfaces. This is split between an {{ svg("outliner_ob_light") }} emissive and {{ svg("light") }} scattering setup. Currently the easiest way to switch between them is through [reloading](./2_loading_data.md).
{: style="height:200px"}
{: style="height:200px"}
??? note "Advanced"
Some things are editable in here, such as the **Anisotropy** of the scattering, which defines whether there is more backward scattering (less penetrant) or more forward scattering.
Additionally, by Adding nodes (from the `Add` menu or `Shift + A`), and connecting these together, it's possible to make combined setups for emissive and scattering shaders.
### Slice Cube

The Slice Cube section allows slicing of the volume. This has an {{ svg("object_data") }} Object pointer to a cube in the scene (by default the loaded slice cube).
The object bounding box gets fed into the slicer, which sets all regions outside the bounding box to transparent.
??? note "How this works"
As shown if you press the {{ svg("nodetree", "small-icon") }} icon at the top right of the group, how the slicing node works is to take the remapped locations as the **Texture Coordinate** input provides (mapping the data to the coordinates of the cube space) and compare these to the boundes (1, -1). If positions are not in the range of the cube space, the shader is set to a *Transparent Shader*.
## Surface shading
The {{ svg("outliner_data_surface") }} Surface object shader is more simple than the volumetric, as it can only have **one color**, although it can have many properties. The shader does not explicitly load the data, as the data interaction is all done through the threshold in the {{ svg("modifier") }} Geometry options.

### Color LUT

The color lookup table works similar to the [volume color LUT](#color-lut). However, the surface can only display one value, so the `Fac` value defines where along the lookup table the color is drawn from.
For a regular **color picker** you can leave the `Fac` at `1` and click the rightmost handle. The other way would be to replace this box with a color box (`Add > Input > RGB`)
### Mesh shading
{: style="height:350px"}
The **Principled BSDF** node is a combined node that combines features to create different material properties. The [Blender manual](https://docs.blender.org/manual/en/latest/render/shader_nodes/shader/principled.html) {{ svg("blender") }} gives a complete manual to its features.
By default this has two inputs set differently from Blender default, the **Base Color** and **Emission Color/Strength**. These colors are set to link to the Color LUT.
The **Emission Strength** is set to 0 or 0.5 depending on whether this was loaded with {{ svg("outliner_ob_light") }} emission on or {{ svg("light") }} emission off. This is done for consistency, and that dark scenes have masks and surfaces as clearly visible as data, without setting up lighting.
??? warning "Emission can 'flatten' objects"
The feeling of **depth** in 3D rendering is often due to the interaction of objects with light. When things are emitting light themselves, they can often look flat. For more feeling of depth, it might be better to load with {{ svg("light", "small-icon") }} emission off, and set up some form of lighting.
### Slice cube
See [volume Slice Cube](#slice-cube).
## Labelmask shading
The {{ svg("outliner_data_pointcloud") }} label mask shader is very similar to the {{ svg("outliner_data_surface") }} Surface shader, but is able to read out and use the `object id` to color by.

### Object ID handling

The `object id` (the value in the label mask) is connected and retrievable from the vertices of the labelmask objects.
This is led into a group that maps it to values between 0 and 1 for the LUT. This has the option of a **revolving sequence**: ideal for a categorical colormap with distinct values, making the object ids loop through these values. Or a **consecutive sequence** scaling all values linearly along the `object id`s, ideal for linear colormaps.
### Color LUT
The color lookup table works similar to the [volume color LUT](#color-lut). Often categorical colormaps work best for labelmasks, if you have only one channel of masks.
| Markdown |
3D | aafkegros/MicroscopyNodes | tests/test_load_types.py | .py | 648 | 24 | from .utils import *
import pytest
loadable = [['volume'],['surface'],['labelmask'], [], ['volume', 'surface'], 'mixed']
@pytest.mark.parametrize('load_as', loadable)
@pytest.mark.parametrize('arrtype', ['5D_5cube', '2D_5x10', '5D_nonrect'])
def test_loading_types(arrtype, load_as):
prep_load(arrtype)
for ch in bpy.context.scene.MiN_channelList:
ch['volume'] = False
load_ch_as = load_as
if load_as == 'mixed':
load_ch_as = loadable[ch['ix'] % 4]
for setting in load_ch_as:
ch[setting] = True
ch_dicts = do_load()
check_channels(ch_dicts, test_render=True)
return
| Python |
3D | aafkegros/MicroscopyNodes | tests/__init__.py | .py | 0 | 0 | null | Python |
3D | aafkegros/MicroscopyNodes | tests/utils.py | .py | 5,266 | 158 | import os
os.environ["MIN_TEST"] = "1"
import bpy
import yaml
from microscopynodes.handle_blender_structs import *
from microscopynodes.file_to_array import *
from microscopynodes.load_components import *
import microscopynodes
import numpy as np
import pytest
import tifffile
import platform
import imageio.v3 as iio
from pathlib import Path
import dask.array as da
test_folder = Path(os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp_test_data"))
test_folder.mkdir(exist_ok=True)
print('imported utils')
def len_axis(dim, axes_order, shape):
if dim in axes_order:
return shape[axes_order.find(dim)]
return 1
def take_index(imgdata, indices, dim, axes_order):
if dim in axes_order:
return da.take(imgdata, indices=indices, axis=axes_order.find(dim))
return imgdata
def make_tif(path, arrtype):
axes = "TZCYX"
if arrtype == '5D_5cube':
arr = np.ones((5,5,5,5,5), dtype=np.uint16)
if arrtype == '2D_5x10':
arr = np.ones((5,10), dtype=np.uint16)
axes = "YX"
if arrtype == '5D_nonrect':
shape = [i for i in range(2,7)]
arr = np.ones(tuple(shape), dtype=np.uint16)
shape = arr.shape
arr = arr.flatten()
for ix in range(len(arr)):
arr[ix] = ix % 12 # don't let values get too big, as all should be handlable as labelmask
arr = arr.reshape(shape)
# if not Path(path).exists():
tifffile.imwrite(path, arr,metadata={"axes": axes}, imagej=True)
return path, arr, axes.lower()
def prep_load(arrtype=None):
# microscopynodes._test_register()
bpy.ops.wm.read_factory_settings(use_empty=True)
pref_template = str(Path(test_folder).parent / "test_preferences_template.yaml")
with open(pref_template) as f:
prefdct = yaml.safe_load(f)
prefdct['cache_path'] = str(test_folder)
pref_path = test_folder / 'pref.yaml'
with open(pref_path, 'w') as f:
yaml.safe_dump(prefdct, f)
bpy.context.scene.MiN_yaml_preferences = str(pref_path)
if arrtype is None:
arrtype = '5D_5cube'
path = test_folder / f'{arrtype}.tif'
path, arr, axes_order = make_tif(path, arrtype)
# bpy.context.scene.MiN_selected_cache_option = "Path"
# bpy.context.scene.MiN_explicit_cache_dir = str(test_folder)
# bpy.context.scene.MiN_cache_dir = str(test_folder)
bpy.context.scene.MiN_input_file = str(path)
# assert(arr_shape() == arr.shape)
assert(len(bpy.context.scene.MiN_channelList) == len_axis('c', axes_order, arr.shape))
return
def do_load():
params = microscopynodes.parse_inputs.parse_initial()
# if platform.system() == 'Linux':
bpy.context.scene.MiN_remake = True
params = microscopynodes.load.load_threaded(params)
microscopynodes.load.load_blocking(params)
return params[0]
def check_channels(ch_dicts, test_render=True):
img1 = None
objs = microscopynodes.load.parse_reload(bpy.data.objects[str(Path(bpy.context.scene.MiN_input_file).stem)])
if test_render:
img1 = quick_render('1')
objs[min_keys.AXES].hide_render = True
img2 = quick_render('2')
objs[min_keys.AXES].hide_render = False
assert(not np.array_equal(img1, img2))
for ch in ch_dicts:
for min_type in [min_keys.SURFACE, min_keys.VOLUME, min_keys.LABELMASK]:
if ch[min_type]:
if objs[min_type] is None:
raise ValueError(f"{min_type} not in objs, while setting is {ch[min_type]}")
ch_obj = ChannelObjectFactory(min_type, objs[min_type])
assert(ch_obj.ch_present(ch))
socket = get_socket(ch_obj.node_group, ch, min_type="SWITCH")
ch_obj.gn_mod[socket.identifier] = False
for ch in ch_dicts:
for min_type in [min_keys.SURFACE, min_keys.VOLUME, min_keys.LABELMASK]:
if ch[min_type]and test_render:
# print(socket)
img1 = quick_render('1')
ch_obj.gn_mod[socket.identifier] = True
img2 = quick_render('2')
ch_obj.gn_mod[socket.identifier] = False
if np.array_equal(img1, img2):
raise ValueError(f"{socket}, ")
assert(not np.array_equal(img1, img2))
def quick_render(name):
bpy.context.scene.cycles.samples = 16
# Set the output file path
output_file = str(test_folder / f'tmp{name}.png')
scn = bpy.context.scene
cam1 = bpy.data.cameras.new("Camera 1")
cam1.lens = 40
cam_obj1 = bpy.data.objects.new("Camera 1", cam1)
cam_obj1.location = (.1, .1, .2)
cam_obj1.rotation_euler = (0.7, 0, 2.3)
scn.collection.objects.link(cam_obj1)
bpy.context.scene.camera = cam_obj1
# Set the viewport resolution
bpy.context.scene.render.resolution_x = 128
bpy.context.scene.render.resolution_y = 128
# Set the output format
bpy.context.scene.render.image_settings.file_format = "PNG"
# Render the viewport and save the result
bpy.ops.render.render()
bpy.data.images["Render Result"].save_render(output_file)
data = np.array(iio.imread(output_file))
# os.remove(output_file)
return data
| Python |
3D | aafkegros/MicroscopyNodes | tests/conftest.py | .py | 1,168 | 42 | import pytest
import bpy
import microscopynodes
import shutil, os
import gc, time
microscopynodes._test_register()
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
import microscopynodes
# regrettably necessary, pytest segfaults if properties
# with callback functions stay alive
UPDATE_PROPS = [
'MiN_input_file',
'MiN_axes_order',
'MiN_channel_nr',
'MiN_reload',
]
deleted = 0
for prop in UPDATE_PROPS:
try:
delattr(bpy.types.Scene, prop)
deleted += 1
except:
print(f"{prop} not found")
try:
microscopynodes.unregister()
except Exception as e:
print(f"Warning during unregister: {e}")
test_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp_test_data")
try:
if os.path.isdir(test_folder):
gc.collect()
time.sleep(0.1) # give Windows a moment
shutil.rmtree(test_folder, ignore_errors=True)
except Exception as e:
print(e)
# print(f'called session finish, {deleted} properties deleted')
# raise ValueError | Python |
3D | aafkegros/MicroscopyNodes | tests/test_zarr_reload.py | .py | 2,055 | 56 | from .utils import *
import pytest
@pytest.mark.parametrize('level', [None, 0, 1, 2])
def test_zarr(level):
prep_load()
bpy.context.scene.MiN_input_file = str(Path(test_folder).parent / 'test_data' / '5D_5cube.zarr')
if not level is None:
bpy.context.scene.MiN_selected_array_option = str(bpy.context.scene.MiN_array_options[level].identifier)
for ch in bpy.context.scene.MiN_channelList:
ch['volume'] = True
ch['surface'] = True
ch_dicts = do_load()
check_channels(ch_dicts, test_render=False)
return
@pytest.mark.parametrize('which_not_update', [['MiN_update_data','MiN_update_settings'], ['MiN_update_data'], ['MiN_update_settings'], []])
def test_reload(which_not_update):
prep_load()
bpy.context.scene.MiN_input_file = str(Path(test_folder).parent / 'test_data' / '5D_5cube.zarr')
bpy.context.scene.MiN_selected_array_option = str(len(bpy.context.scene.MiN_array_options)-1)
ch_dicts1 = do_load()
objects1 = set([obj.name for obj in bpy.data.objects])
bpy.context.scene.MiN_reload = bpy.data.objects[str(Path(bpy.context.scene.MiN_input_file).stem)]
for setting in which_not_update:
bpy.context.scene[setting] = False
for ch in bpy.context.scene.MiN_channelList:
ch['volume'] = False
ch['surface'] = True
bpy.context.scene.MiN_channelList[0]['volume'] = True
bpy.context.scene.MiN_selected_array_option = str(len(bpy.context.scene.MiN_array_options) -2)
ch_dicts2 = do_load()
objects2 = set([obj.name for obj in bpy.data.objects])
if bpy.context.scene.MiN_update_data:
assert(len(objects1 - objects2) == 1) # only old data was deleted
assert(len(objects2 - objects1) == 5 + 1) # new data (n channels) and surfaces were added
else:
# surfaces were not created, so should not be checked
for ch in ch_dicts2:
ch[min_keys.SURFACE] = 0
if bpy.context.scene.MiN_update_settings:
check_channels(ch_dicts2, test_render=False)
| Python |
3D | ZhangLingMing1/TSGCNet | train.py | .py | 4,585 | 115 | from dataloader import plydataset
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import time
import numpy as np
import os
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from pathlib import Path
import torch.nn.functional as F
import datetime
import logging
from utils import test_semseg
from TSGCNet import TSGCNet
import random
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = '0, 1'
"""-------------------------- parameters --------------------------------------"""
batch_size = 2
k = 32
"""--------------------------- create Folder ----------------------------------"""
experiment_dir = Path('./experiment/')
experiment_dir.mkdir(exist_ok=True)
current_time = str(datetime.datetime.now().strftime('%m-%d_%H-%M'))
file_dir = Path(str(experiment_dir) + '/maiqi')
file_dir.mkdir(exist_ok=True)
log_dir, checkpoints = file_dir.joinpath('logs/'), file_dir.joinpath('checkpoints')
log_dir.mkdir(exist_ok=True)
checkpoints.mkdir(exist_ok=True)
formatter = logging.Formatter('%(name)s - %(message)s')
logger = logging.getLogger("all")
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(str(log_dir) + '/log.txt')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
writer = SummaryWriter(file_dir.joinpath('tensorboard'))
"""-------------------------------- Dataloader --------------------------------"""
train_dataset = plydataset("data/train")
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_dataset = plydataset("data/test")
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=1)
"""--------------------------- Build Network and optimizer----------------------"""
model = TSGCNet(in_channels=12, output_channels=8, k=k)
model = torch.nn.DataParallel(model, device_ids=[0,1])
model.cuda()
optimizer = torch.optim.Adam(
model.parameters(),
lr=1e-3,
betas=(0.9, 0.999),
weight_decay=1e-5
)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
"""------------------------------------- train --------------------------------"""
logger.info("------------------train------------------")
best_acc = 0
LEARNING_RATE_CLIP = 1e-5
his_loss = []
his_smotth = []
class_weights = torch.ones(15).cuda()
for epoch in range(0, 200):
scheduler.step()
lr = max(optimizer.param_groups[0]['lr'], LEARNING_RATE_CLIP)
optimizer.param_groups[0]['lr'] = lr
for i, data in tqdm(enumerate(train_loader, 0), total=len(train_loader), smoothing=0.9):
_, points_face, label_face, label_face_onehot, name, _ = data
coordinate = points_face.transpose(2,1)
coordinate, label_face = Variable(coordinate.float()), Variable(label_face.long())
label_face_onehot = Variable(label_face_onehot)
coordinate, label_face, label_face_onehot = coordinate.cuda(), label_face.cuda(), label_face_onehot.cuda()
optimizer.zero_grad()
pred = model(coordinate)
label_face = label_face.view(-1, 1)[:, 0]
pred = pred.contiguous().view(-1, 8)
loss = F.nll_loss(pred, label_face)
loss.backward()
optimizer.step()
his_loss.append(loss.cpu().data.numpy())
if epoch % 10 == 0:
print('Learning rate: %f' % (lr))
print("loss: %f" % (np.mean(his_loss)))
writer.add_scalar("loss", np.mean(his_loss), epoch)
metrics, mIoU, cat_iou = test_semseg(model, test_loader, num_classes=8)
print("Epoch %d, accuracy= %f, mIoU= %f " % (epoch, metrics['accuracy'], mIoU))
logger.info("Epoch: %d, accuracy= %f, mIoU= %f loss= %f" % (epoch, metrics['accuracy'], mIoU, np.mean(his_loss)))
writer.add_scalar("accuracy", metrics['accuracy'], epoch)
if (metrics['accuracy'] > best_acc):
best_acc = metrics['accuracy']
print("best accuracy: %f best mIoU :%f" % (best_acc, mIoU))
print(cat_iou)
torch.save(model.state_dict(), '%s/coordinate_%d_%f.pth' % (checkpoints, epoch, best_acc))
best_pth = '%s/coordinate_%d_%f.pth' % (checkpoints, epoch, best_acc)
logger.info(cat_iou)
his_loss.clear()
writer.close()
| Python |
3D | ZhangLingMing1/TSGCNet | TSGCNet.py | .py | 10,340 | 293 | import os
import sys
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torch.autograd import Variable
def knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k+1, dim=-1)[1][:,:,1:] # (batch_size, num_points, k)
return idx
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1, self.k * self.k).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
def get_graph_feature(coor, nor, k=10):
batch_size, num_dims, num_points = coor.shape
coor = coor.view(batch_size, -1, num_points)
idx = knn(coor, k=k)
index = idx
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = coor.size()
_, num_dims2, _ = nor.size()
coor = coor.transpose(2,1).contiguous()
nor = nor.transpose(2,1).contiguous()
# coordinate
coor_feature = coor.view(batch_size * num_points, -1)[idx, :]
coor_feature = coor_feature.view(batch_size, num_points, k, num_dims)
coor = coor.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
coor_feature = torch.cat((coor_feature, coor), dim=3).permute(0, 3, 1, 2).contiguous()
# normal vector
nor_feature = nor.view(batch_size * num_points, -1)[idx, :]
nor_feature = nor_feature.view(batch_size, num_points, k, num_dims2)
nor = nor.view(batch_size, num_points, 1, num_dims2).repeat(1, 1, k, 1)
nor_feature = torch.cat((nor_feature, nor), dim=3).permute(0, 3, 1, 2).contiguous()
return coor_feature, nor_feature, index
class GraphAttention(nn.Module):
def __init__(self,feature_dim,out_dim, K):
super(GraphAttention, self).__init__()
self.dropout = 0.6
self.conv = nn.Sequential(nn.Conv2d(feature_dim * 2, out_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(out_dim),
nn.LeakyReLU(negative_slope=0.2))
self.K=K
def forward(self, Graph_index, x, feature):
B, C, N = x.shape
x = x.contiguous().view(B, N, C)
feature = feature.permute(0,2,3,1)
neighbor_feature = index_points(x, Graph_index)
centre = x.view(B, N, 1, C).expand(B, N, self.K, C)
delta_f = torch.cat([centre-neighbor_feature, neighbor_feature], dim=3).permute(0,3,2,1)
e = self.conv(delta_f)
e = e.permute(0,3,2,1)
attention = F.softmax(e, dim=2) # [B, npoint, nsample,D]
graph_feature = torch.sum(torch.mul(attention, feature),dim = 2) .permute(0,2,1)
return graph_feature
class TSGCNet(nn.Module):
def __init__(self, k=16, in_channels=12, output_channels=8):
super(TSGCNet, self).__init__()
self.k = k
''' coordinate stream '''
self.bn1_c = nn.BatchNorm2d(64)
self.bn2_c = nn.BatchNorm2d(128)
self.bn3_c = nn.BatchNorm2d(256)
self.bn4_c = nn.BatchNorm1d(512)
self.conv1_c = nn.Sequential(nn.Conv2d(in_channels*2, 64, kernel_size=1, bias=False),
self.bn1_c,
nn.LeakyReLU(negative_slope=0.2))
self.conv2_c = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False),
self.bn2_c,
nn.LeakyReLU(negative_slope=0.2))
self.conv3_c = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False),
self.bn3_c,
nn.LeakyReLU(negative_slope=0.2))
self.conv4_c = nn.Sequential(nn.Conv1d(448, 512, kernel_size=1, bias=False),
self.bn4_c,
nn.LeakyReLU(negative_slope=0.2))
self.attention_layer1_c = GraphAttention(feature_dim=12, out_dim=64, K=self.k)
self.attention_layer2_c = GraphAttention(feature_dim=64, out_dim=128, K=self.k)
self.attention_layer3_c = GraphAttention(feature_dim=128, out_dim=256, K=self.k)
self.FTM_c1 = STNkd(k=12)
''' normal stream '''
self.bn1_n = nn.BatchNorm2d(64)
self.bn2_n = nn.BatchNorm2d(128)
self.bn3_n = nn.BatchNorm2d(256)
self.bn4_n = nn.BatchNorm1d(512)
self.conv1_n = nn.Sequential(nn.Conv2d((in_channels)*2, 64, kernel_size=1, bias=False),
self.bn1_n,
nn.LeakyReLU(negative_slope=0.2))
self.conv2_n = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False),
self.bn2_n,
nn.LeakyReLU(negative_slope=0.2))
self.conv3_n = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False),
self.bn3_n,
nn.LeakyReLU(negative_slope=0.2))
self.conv4_n = nn.Sequential(nn.Conv1d(448, 512, kernel_size=1, bias=False),
self.bn4_n,
nn.LeakyReLU(negative_slope=0.2))
self.FTM_n1 = STNkd(k=12)
'''feature-wise attention'''
self.fa = nn.Sequential(nn.Conv1d(1024, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2))
''' feature fusion '''
self.pred1 = nn.Sequential(nn.Conv1d(1024, 512, kernel_size=1, bias=False),
nn.BatchNorm1d(512),
nn.LeakyReLU(negative_slope=0.2))
self.pred2 = nn.Sequential(nn.Conv1d(512, 256, kernel_size=1, bias=False),
nn.BatchNorm1d(256),
nn.LeakyReLU(negative_slope=0.2))
self.pred3 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(negative_slope=0.2))
self.pred4 = nn.Sequential(nn.Conv1d(128, output_channels, kernel_size=1, bias=False))
self.dp1 = nn.Dropout(p=0.6)
self.dp2 = nn.Dropout(p=0.6)
self.dp3 = nn.Dropout(p=0.6)
def forward(self, x):
coor = x[:, :12, :]
nor = x[:, 12:, :]
# transform
trans_c = self.FTM_c1(coor)
coor = coor.transpose(2, 1)
coor = torch.bmm(coor, trans_c)
coor = coor.transpose(2, 1)
trans_n = self.FTM_n1(nor)
nor = nor.transpose(2, 1)
nor = torch.bmm(nor, trans_n)
nor = nor.transpose(2, 1)
coor1, nor1, index = get_graph_feature(coor, nor, k=self.k)
coor1 = self.conv1_c(coor1)
nor1 = self.conv1_n(nor1)
coor1 = self.attention_layer1_c(index, coor, coor1)
nor1 = nor1.max(dim=-1, keepdim=False)[0]
coor2, nor2, index = get_graph_feature(coor1, nor1, k=self.k)
coor2 = self.conv2_c(coor2)
nor2 = self.conv2_n(nor2)
coor2 = self.attention_layer2_c(index, coor1, coor2)
nor2 = nor2.max(dim=-1, keepdim=False)[0]
coor3, nor3, index = get_graph_feature(coor2, nor2, k=self.k)
coor3 = self.conv3_c(coor3)
nor3 = self.conv3_n(nor3)
coor3 = self.attention_layer3_c(index, coor2, coor3)
nor3 = nor3.max(dim=-1, keepdim=False)[0]
coor = torch.cat((coor1, coor2, coor3), dim=1)
coor = self.conv4_c(coor)
nor = torch.cat((nor1, nor2, nor3), dim=1)
nor = self.conv4_n(nor)
avgSum_coor = coor.sum(1)/512
avgSum_nor = nor.sum(1)/512
avgSum = avgSum_coor+avgSum_nor
weight_coor = (avgSum_coor / avgSum).reshape(1,1,16000)
weight_nor = (avgSum_nor / avgSum).reshape(1,1,16000)
x = torch.cat((coor*weight_coor, nor*weight_nor), dim=1)
weight = self.fa(x)
x = weight*x
x = self.pred1(x)
self.dp1(x)
x = self.pred2(x)
self.dp2(x)
x = self.pred3(x)
self.dp3(x)
score = self.pred4(x)
score = F.log_softmax(score, dim=1)
score = score.permute(0, 2, 1)
return score
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
# input size: [batch_size, C, N], where C is number of dimension, N is the number of mesh.
x = torch.rand(1,24,16000)
x = x.cuda()
model = TSGCNet(in_channels=12, output_channels=8, k=32)
model = model.cuda()
y = model(x)
print(y.shape)
| Python |
3D | ZhangLingMing1/TSGCNet | utils.py | .py | 4,415 | 118 | # *_*coding:utf-8 *_*
import os
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
from collections import defaultdict
import pandas as pd
from dataloader import generate_plyfile, plydataset
def compute_cat_iou(pred,target,iou_tabel): # pred [B,N,C] target [B,N]
iou_list = []
target = target.cpu().data.numpy()
for j in range(pred.size(0)):
batch_pred = pred[j] # batch_pred [N,C]
batch_target = target[j] # batch_target [N]
batch_choice = batch_pred.data.max(1)[1].cpu().data.numpy() # index of max value batch_choice [N]
for cat in np.unique(batch_target):
# intersection = np.sum((batch_target == cat) & (batch_choice == cat))
# union = float(np.sum((batch_target == cat) | (batch_choice == cat)))
# iou = intersection/union if not union ==0 else 1
I = np.sum(np.logical_and(batch_choice == cat, batch_target == cat))
U = np.sum(np.logical_or(batch_choice == cat, batch_target == cat))
if U == 0:
iou = 1 # If the union of groundtruth and prediction points is empty, then count part IoU as 1
else:
iou = I / float(U)
iou_tabel[cat,0] += iou
iou_tabel[cat,1] += 1
iou_list.append(iou)
return iou_tabel,iou_list
def compute_overall_iou(pred, target, num_classes):
shape_ious = []
pred_np = pred.cpu().data.numpy()
target_np = target.cpu().data.numpy()
for shape_idx in range(pred.size(0)):
part_ious = []
for part in range(num_classes):
I = np.sum(np.logical_and(pred_np[shape_idx].max(1) == part, target_np[shape_idx] == part))
U = np.sum(np.logical_or(pred_np[shape_idx].max(1) == part, target_np[shape_idx] == part))
if U == 0:
iou = 1 #If the union of groundtruth and prediction points is empty, then count part IoU as 1
else:
iou = I / float(U)
part_ious.append(iou)
shape_ious.append(np.mean(part_ious))
return shape_ious
def test_semseg(model, loader, num_classes = 8, gpu=True, generate_ply=False):
'''
Input
:param model:
:param loader:
:param num_classes:
:param pointnet2:
Output
metrics: metrics['accuracy']-> overall accuracy
metrics['iou']-> mean Iou
hist_acc: history of accuracy
cat_iou: IoU for o category
'''
iou_tabel = np.zeros((num_classes,3))
metrics = defaultdict(lambda:list())
hist_acc = []
for batch_id, (index, points, label_face, label_face_onehot, name, raw_points_face) in tqdm(enumerate(loader), total=len(loader), smoothing=0.9):
batchsize, num_point, _ = points.size()
points_face = raw_points_face[0].numpy()
index_face = index[0].numpy()
coordinate = points.transpose(2,1)
normal = points[:, :, 12:]
centre = points[:, :, 9:12]
label_face = label_face[:, :, 0]
coordinate, label_face, centre = Variable(coordinate.float()), Variable(label_face.long()), Variable(centre.float())
coordinate, label_face, centre = coordinate.cuda(), label_face.cuda(), centre.cuda()
with torch.no_grad():
pred = model(coordinate)
iou_tabel, iou_list = compute_cat_iou(pred,label_face,iou_tabel)
pred = pred.contiguous().view(-1, num_classes)
label_face = label_face.view(-1, 1)[:, 0]
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(label_face.data).cpu().sum()
metrics['accuracy'].append(correct.item()/ (batchsize * num_point))
label_face = pred_choice.cpu().reshape(pred_choice.shape[0], 1)
if generate_ply:
#label_face=label_optimization(index_face, label_face)
generate_plyfile(index_face, points_face, label_face, path=("pred_global/%s") % name)
iou_tabel[:,2] = iou_tabel[:,0] /iou_tabel[:,1]
hist_acc += metrics['accuracy']
metrics['accuracy'] = np.mean(metrics['accuracy'])
metrics['iou'] = np.mean(iou_tabel[:, 2])
iou_tabel = pd.DataFrame(iou_tabel,columns=['iou','count','mean_iou'])
iou_tabel['Category_IOU'] = ["label%d"%(i) for i in range(num_classes)]
cat_iou = iou_tabel.groupby('Category_IOU')['mean_iou'].mean()
mIoU = np.mean(cat_iou)
return metrics, mIoU, cat_iou
| Python |
3D | ZhangLingMing1/TSGCNet | dataloader.py | .py | 7,188 | 164 | from plyfile import PlyData
import numpy as np
from torch.utils.data import DataLoader,Dataset,random_split
import os
import pandas as pd
labels = ((255, 255, 255), (255, 0, 0), (255, 125, 0),(255, 255, 0), (0, 255, 0), (0, 255, 255),
(0, 0, 255), (255, 0, 255))
def get_data(path=""):
labels = ([255,255,255], [255, 0, 0], [255, 125, 0], [255, 255, 0], [0, 255, 0], [0, 255, 255],
[0, 0, 255], [255, 0, 255])
row_data = PlyData.read(path) # read ply file
points = np.array(pd.DataFrame(row_data.elements[0].data))
faces = np.array(pd.DataFrame(row_data.elements[1].data))
n_face = faces.shape[0] # number of faces
xyz = points[:, :3] # coordinate of vertex shape=[N, 3]
normal = points[:, 3:] # normal of vertex shape=[N, 3]
label_face = np.zeros([n_face,1]).astype('int32')
label_face_onehot = np.zeros([n_face,8]).astype(('int32'))
""" index of faces shape=[N, 3] """
index_face = np.concatenate((faces[:, 0]), axis=0).reshape(n_face, 3)
""" RGB of faces shape=[N, 3] """
RGB_face = faces[:, 1:4]
""" coordinate of 3 vertexes shape=[N, 9] """
xyz_face = np.concatenate((xyz[index_face[:, 0], :], xyz[index_face[:, 1], :],xyz[index_face[:, 2], :]), axis=1)
""" normal of 3 vertexes shape=[N, 9] """
normal_vertex = np.concatenate((normal[index_face[:, 0], :], normal[index_face[:, 1], :],normal[index_face[:, 2], :]), axis=1)
normal_face = faces[:, 5:]
x1, y1, z1 = xyz_face[:, 0], xyz_face[:, 1], xyz_face[:, 2]
x2, y2, z2 = xyz_face[:, 3], xyz_face[:, 4], xyz_face[:, 5]
x3, y3, z3 = xyz_face[:, 6], xyz_face[:, 7], xyz_face[:, 8]
x_centre = (x1 + x2 + x3) / 3
y_centre = (y1 + y2 + y3) / 3
z_centre = (z1 + z2 + z3) / 3
centre_face = np.concatenate((x_centre.reshape(n_face,1),y_centre.reshape(n_face,1),z_centre.reshape(n_face,1)), axis=1)
""" get points of each face, concat all of above, shape=[N, 24]"""
points_face = np.concatenate((xyz_face, centre_face, normal_vertex, normal_face), axis=1).astype('float32')
""" get label of each face """
for i, label in enumerate(labels):
label_face[(RGB_face == label).all(axis=1)] = i
label_face_onehot[(RGB_face == label).all(axis=1), i] = 1
return index_face, points_face, label_face, label_face_onehot, points
def generate_plyfile(index_face, point_face, label_face, path= " "):
"""
Input:
index_face: index of points in a face [N, 3]
points_face: 3 points coordinate in a face + 1 center point coordinate [N, 12]
label_face: label of face [N, 1]
path: path to save new generated ply file
Return:
"""
unique_index = np.unique(index_face.flatten()) # get unique points index
flag = np.zeros([unique_index.max()+1, 2]).astype('uint64')
order = 0
with open(path, "a") as f:
f.write("ply\n")
f.write("format ascii 1.0\n")
f.write("comment VCGLIB generated\n")
f.write("element vertex " + str(unique_index.shape[0]) + "\n")
f.write("property float x\n")
f.write("property float y\n")
f.write("property float z\n")
f.write("property float nx\n")
f.write("property float ny\n")
f.write("property float nz\n")
f.write("element face " + str(index_face.shape[0]) + "\n")
f.write("property list uchar int vertex_indices\n")
f.write("property uchar red\n")
f.write("property uchar green\n")
f.write("property uchar blue\n")
f.write("property uchar alpha\n")
f.write("end_header\n")
for i, index in enumerate(index_face):
for j, data in enumerate(index):
if flag[data, 0] == 0: # if this point has not been wrote
xyz = point_face[i, 3*j:3*(j+1)] # Get coordinate
xyz_nor = point_face[i, 3*(j+3):3*(j+4)]
f.write(str(xyz[0]) + " " + str(xyz[1]) + " " + str(xyz[2]) + " " + str(xyz_nor[0]) + " "
+ str(xyz_nor[1]) + " " + str(xyz_nor[2]) + "\n")
flag[data, 0] = 1 # this point has been wrote
flag[data, 1] = order # give point a new index
order = order + 1 # index add 1 for next point
for i, data in enumerate(index_face): # write new point index for every face
RGB = labels_change_color[label_face[i, 0]] # Get RGB value according to face label
f.write(str(3) + " " + str(int(flag[data[0], 1])) + " " + str(int(flag[data[1], 1])) + " "
+ str(int(flag[data[2], 1])) + " " + str(RGB[0]) + " " + str(RGB[1]) + " "
+ str(RGB[2]) + " " + str(255) + "\n")
f.close()
class plydataset(Dataset):
def __init__(self, path="data/train"):
self.root_path = path
self.file_list = os.listdir(path)
def __len__(self):
return len(self.file_list)
def __getitem__(self, item):
read_path = os.path.join(self.root_path, self.file_list[item])
index_face, points_face, label_face, label_face_onehot, points = get_data(path=read_path)
raw_points_face = points_face.copy()
# move all mesh to origin
centre = points_face[:, 9:12].mean(axis=0)
points_face[:, 0:3] -= centre
points_face[:, 3:6] -= centre
points_face[:, 6:9] -= centre
points_face[:, 9:12] = (points_face[:, 0:3] + points_face[:, 3:6] + points_face[:, 6:9]) / 3
points[:, :3] -= centre
max = points.max()
points_face[:, :12] = points_face[:, :12] / max
# normalized data
maxs = points[:, :3].max(axis=0)
mins = points[:, :3].min(axis=0)
means = points[:, :3].mean(axis=0)
stds = points[:, :3].std(axis=0)
nmeans = points[:, 3:].mean(axis=0)
nstds = points[:, 3:].std(axis=0)
nmeans_f = points_face[:, 21:].mean(axis=0)
nstds_f = points_face[:, 21:].std(axis=0)
for i in range(3):
#normalize coordinate
points_face[:, i] = (points_face[:, i] - means[i]) / stds[i] # point 1
points_face[:, i + 3] = (points_face[:, i + 3] - means[i]) / stds[i] # point 2
points_face[:, i + 6] = (points_face[:, i + 6] - means[i]) / stds[i] # point 3
points_face[:, i + 9] = (points_face[:, i + 9] - mins[i]) / (maxs[i] - mins[i]) # centre
#normalize normal vector
points_face[:, i + 12] = (points_face[:, i + 12] - nmeans[i]) / nstds[i] # normal1
points_face[:, i + 15] = (points_face[:, i + 15] - nmeans[i]) / nstds[i] # normal2
points_face[:, i + 18] = (points_face[:, i + 18] - nmeans[i]) / nstds[i] # normal3
points_face[:, i + 21] = (points_face[:, i + 21] - nmeans_f[i]) / nstds_f[i] # face normal
return index_face, points_face, label_face, label_face_onehot, self.file_list[item], raw_points_face
if __name__ == "__main__":
print(" ")
| Python |
3D | llien30/point_cloud_anomaly_detection | train.py | .py | 4,094 | 156 | import argparse
import os
import random
import numpy as np
import torch
import wandb
import yaml
from addict import Dict
# from emd.emd_module import emdModule
from libs.checkpoint import save_checkpoint
from libs.dataset import ShapeNeth5pyDataset
from libs.foldingnet import SkipValiationalFoldingNet
from libs.helper import train_variational_foldingnet
from torch.utils.data import DataLoader
def get_parameters():
"""
make parser to get parameters
"""
parser = argparse.ArgumentParser(description="take config file path")
parser.add_argument("config", type=str, help="path of a config file for training")
parser.add_argument("--no_wandb", action="store_true", help="Add --no_wandb option")
return parser.parse_args()
def main():
args = get_parameters()
# configuration
with open(args.config, "r") as f:
config_dict = yaml.safe_load(f)
CONFIG = Dict(config_dict)
print(config_dict)
assert CONFIG.reconstruction_loss in [
"CD",
"EMD",
], "reconstruction loss must be CD(Chamfer Distance) or EMD(Earth mover's Distance)"
if not args.no_wandb:
wandb.init(
config=CONFIG,
name=CONFIG.name,
project="ICIP2021",
)
torch.autograd.set_detect_anomaly(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dataset = ShapeNeth5pyDataset(
root_path=CONFIG.root_path,
split="train",
normal_class=CONFIG.normal_class,
abnormal_class=[],
n_point=CONFIG.n_points,
random_rotate=CONFIG.rotate,
random_jitter=CONFIG.jitter,
random_translate=CONFIG.translate,
)
train_dataloader = DataLoader(
train_dataset, batch_size=CONFIG.batch_size, shuffle=True
)
model = SkipValiationalFoldingNet(CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape)
model.to(device)
if CONFIG.reconstruction_loss == "CD":
# lr = 0.0001 * 100 / CONFIG.batch_size
lr = 0.001
elif CONFIG.reconstruction_loss == "EMD":
lr = 0.0001 * 16 / CONFIG.batch_size
beta1, beta2 = 0.9, 0.999
optimizer = torch.optim.Adam(
model.parameters(), lr, [beta1, beta2], weight_decay=1e-6
)
if not args.no_wandb:
# Magic
wandb.watch(model, log="all")
print("---------- Start training ----------")
for epoch in range(CONFIG.num_epochs):
(
epoch_loss,
epoch_inner_loss,
epoch_out_loss,
epoch_kld_loss,
epoch_fake_kld_loss,
epoch_time,
) = train_variational_foldingnet(
train_dataloader,
model,
CONFIG.reconstruction_loss,
optimizer,
CONFIG.weight,
epoch,
device,
CONFIG.save_dir,
)
print(
f"inner_loss:{epoch_inner_loss} || out_loss:{epoch_out_loss} || kld_loss:{epoch_kld_loss}"
)
if not os.path.exists(os.path.join(CONFIG.save_dir, "checkpoint")):
os.makedirs(os.path.join(CONFIG.save_dir, "checkpoint"))
save_checkpoint(
os.path.join(CONFIG.save_dir, "checkpoint"),
epoch,
model,
optimizer,
)
print(f"epoch{epoch} || LOSS:{epoch_loss} | time:{epoch_time}")
wandb.log(
{
"train_time": epoch_time,
"loss": epoch_loss,
"inner_loss": epoch_inner_loss,
"out_loss": epoch_out_loss,
"kld_loss": epoch_kld_loss,
"fake_kld_loss": epoch_fake_kld_loss,
},
step=epoch,
)
print("Done")
if __name__ == "__main__":
main()
| Python |
3D | llien30/point_cloud_anomaly_detection | test.py | .py | 10,046 | 321 | import argparse
import os
import random
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import yaml
from addict import Dict
from sklearn.metrics import auc, roc_curve
from torch.utils.data import DataLoader
from libs.checkpoint import resume
from libs.dataset import ShapeNeth5pyDataset
from libs.emd.emd_module import emdModule
from libs.foldingnet import FoldingNet, SkipFoldingNet, SkipValiationalFoldingNet
from libs.loss import ChamferLoss
def get_parameters():
"""
make parser to get parameters
"""
parser = argparse.ArgumentParser(description="take config file path")
parser.add_argument("config", type=str, help="path of a config file for testing")
parser.add_argument(
"--checkpoint_path",
type=str,
help="path of the file where the weight is saved",
)
parser.add_argument(
"-c",
"--chamfer",
action="store_true",
help="Whether to add a chamfer score or not",
)
parser.add_argument(
"-e",
"--emd",
action="store_true",
help="Whether to add a emd score or not",
)
parser.add_argument(
"-k",
"--kldiv",
action="store_true",
help="Whether to add a kldiv score or not",
)
parser.add_argument(
"-f",
"--feature_diff",
action="store_true",
help="Whether to add a feature diff score or not",
)
parser.add_argument(
"--histgram",
action="store_true",
help="Visualize histgram or not",
)
parser.add_argument(
"--save_points",
action="store_true",
help="Save points or not",
)
return parser.parse_args()
def rescale(input):
input = np.array(input, dtype=float)
_min = np.array(min(input))
_max = np.array(max(input))
with np.errstate(invalid="ignore"):
re_scaled = (input - _min) / (_max - _min)
return re_scaled
def vis_histgram(label: List, result: List, save_name: str) -> None:
normal_result = []
abnormal_result = []
for lbl, r in zip(label, result):
if lbl == 0:
normal_result.append(r * 1000)
else:
abnormal_result.append(r * 1000)
bin_max = max(max(normal_result), max(abnormal_result))
bins = np.linspace(0, bin_max, 100)
plt.hist(normal_result, bins, alpha=0.5, label="normal")
plt.hist(abnormal_result, bins, alpha=0.5, label="abnormal")
plt.xlabel("Anomaly Score")
plt.ylabel("Number of samples")
plt.legend(loc="upper right")
plt.savefig(save_name)
plt.close()
def main():
# seedの固定
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed(0)
args = get_parameters()
# configuration
with open(args.config, "r") as f:
config_dict = yaml.safe_load(f)
CONFIG = Dict(config_dict)
print(config_dict)
torch.autograd.set_detect_anomaly(True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_dataset = ShapeNeth5pyDataset(
root_path=CONFIG.root_path,
split="test",
normal_class=CONFIG.normal_class,
abnormal_class=CONFIG.abnormal_class,
n_point=CONFIG.n_points,
random_rotate=False,
random_jitter=False,
random_translate=False,
)
test_dataloader = DataLoader(
test_dataset, batch_size=CONFIG.test_batch_size, shuffle=False
)
if CONFIG.model == "FoldingNet":
model = FoldingNet(CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape)
elif CONFIG.model == "SkipFoldingNet":
model = SkipFoldingNet(CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape)
elif CONFIG.model == "SkipVariationalFoldingNet":
model = SkipValiationalFoldingNet(
CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape
)
model.to(device)
lr = 0.0001 * 16 / CONFIG.batch_size
beta1, beta2 = 0.9, 0.999
optimizer = torch.optim.Adam(
model.parameters(), lr, [beta1, beta2], weight_decay=1e-6
)
epoch, model, optimizer = resume(args.checkpoint_path, model, optimizer)
print(f"---------- Start testing for epoch{epoch} ----------")
model.eval()
pred = []
labels = [""] * len(test_dataloader.dataset)
names = [""] * len(test_dataloader.dataset)
n = 0
chamferloss = ChamferLoss()
emd_loss = emdModule()
chamfer_scores = []
emd_scores = []
kldiv_scores = []
feature_diff_scores = []
for samples in test_dataloader:
data = samples["data"].float()
label = samples["label"]
name = samples["name"]
mini_batch_size = data.size()[0]
data = data.to(device)
if CONFIG.model == "SkipVariationalFoldingNet":
with torch.no_grad():
output, folding1, mu, log_var = model(data)
if args.kldiv or args.feature_diff:
_, _, fake_mu, fake_log_var = model(output)
if args.chamfer:
for d, o in zip(data, output):
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
cl = chamferloss(d, o)
chamfer_scores.append(cl)
else:
for _ in range(mini_batch_size):
chamfer_scores.append(0)
if args.emd:
for d, o in zip(data, output):
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
el, _ = emd_loss(d, o, 0.005, 50)
el = torch.sqrt(el).mean(1)
emd_scores.append(el)
else:
for _ in range(mini_batch_size):
emd_scores.append(0)
if args.kldiv:
for m, l in zip(mu, log_var):
kldiv = torch.mean(
-0.5 * torch.sum(1 + l - m ** 2 - l.exp(), dim=1), dim=0
)
# kldiv = torch.mean(
# 0.5
# * torch.sum(
# m ** 2 + l ** 2 - torch.log(l ** 2 + 1e-12) - 1, dim=1
# ),
# dim=0,
# )
kldiv_scores.append(kldiv)
# for m, l, fm, fl in zip(mu, log_var, fake_mu, fake_log_var):
# P = torch.distributions.Normal(m, l)
# Q = torch.distributions.Normal(fm, fl)
# # kld_loss = torch.distributions.kl_divergence(G, P).mean()
# kldiv = torch.distributions.kl_divergence(P, Q).mean()
# # kldiv = torch.mean(
# # -0.5 * torch.sum(1 + l - m ** 2 - l.exp(), dim=1), dim=0
# # )
# kldiv_scores.append(kldiv)
else:
for _ in range(mini_batch_size):
kldiv_scores.append(0)
if args.feature_diff:
for m, l, fm, fl in zip(mu, log_var, fake_mu, fake_log_var):
std = torch.exp(0.5 * l)
eps = torch.randn_like(std)
feat = eps * std + m
fake_std = torch.exp(0.5 * fl)
fake_eps = torch.randn_like(fake_std)
fake_feat = fake_eps * fake_std + fm
diff_feat = feat - fake_feat
diff_feat = diff_feat.reshape(-1)
feature_diff_score = np.mean(
np.power(diff_feat.to("cpu").numpy(), 2.0)
)
feature_diff_scores.append(feature_diff_score)
else:
for _ in range(mini_batch_size):
feature_diff_scores.append(0)
if args.save_points:
for i in range(mini_batch_size):
o = output[i]
d = data[i]
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
d = d.to("cpu").numpy()
o = o.to("cpu").numpy()
if not os.path.exists("./original"):
os.makedirs("./original")
if not os.path.exists("./reconstructed"):
os.makedirs("./reconstructed")
np.save(f"./original/{n+i}.npy", d)
np.save(f"./reconstructed/{n+i}.npy", o)
labels[n : n + mini_batch_size] = label.reshape(mini_batch_size)
names[n : n + mini_batch_size] = name
n += mini_batch_size
if args.chamfer:
chamfer_scores = rescale(chamfer_scores)
if args.emd:
emd_scores = rescale(emd_scores)
if args.kldiv:
kldiv_scores = rescale(kldiv_scores)
if args.feature_diff:
feature_diff_scores = rescale(feature_diff_scores)
for chamfer_score, emd_score, kldiv_score, feature_diff_score in zip(
chamfer_scores, emd_scores, kldiv_scores, feature_diff_scores
):
score = chamfer_score + emd_score + kldiv_score + feature_diff_score
pred.append(score)
pred = np.array(pred, dtype=float)
_min = np.array(min(pred))
_max = np.array(max(pred))
re_scaled = (pred - _min) / (_max - _min)
# re_scaled = rescale(pred)
re_scaled = np.array(re_scaled, dtype=float)
fpr, tpr, _ = roc_curve(labels, re_scaled)
roc_auc = auc(fpr, tpr)
if args.histgram:
vis_histgram(labels, pred, "lamp_histgram.png")
if args.save_points:
df = pd.DataFrame(list(zip(names, labels, pred)))
df.to_csv("result.csv")
print(f"ROC:{roc_auc}")
print("Done")
if __name__ == "__main__":
main()
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/loss.py | .py | 1,506 | 45 | # from collections import Counter, defaultdict
import torch
import torch.nn as nn
# from ortools.linear_solver import pywraplp
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def batch_pairwise_dist(self, x, y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind_x = torch.arange(0, num_points_x)
diag_ind_y = torch.arange(0, num_points_y)
if x.get_device() != -1:
diag_ind_x = diag_ind_x.cuda(x.get_device())
diag_ind_y = diag_ind_y.cuda(x.get_device())
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P
def forward(self, preds, gts):
P = self.batch_pairwise_dist(gts, preds)
# print(P.shape)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
def mse_loss(input: torch.tensor, output: torch.tensor, size_average=True) -> float:
if size_average:
return torch.mean(torch.pow((input - output), 2))
else:
return torch.pow((input - output), 2)
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/dataset.py | .py | 12,035 | 374 | import copy
import glob
import json
import os
import random
# from .visualize import vis_points_3d
from typing import Tuple
import h5py
import numpy as np
import pandas as pd
import torch
from torch.utils import data
from .load_obj import loadOBJ
from .sampling import fartherst_point_sampling
class ShapeNetDataset(data.Dataset):
def __init__(self, csv_file, sampling="fps", n_point=2000):
super().__init__()
assert sampling.lower() in [
"fps",
"random",
"order",
], "The sampling method must be 'fps','random', or 'order'!"
self.df = pd.read_csv(csv_file)
self.n_point = n_point
self.sampling = sampling
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
path = self.df.iloc[idx]["path"]
point = loadOBJ(path)
point = np.array(point)
# vis_point = torch.tensor(point)
# vis_points_3d(vis_point, f"./{idx}_original.png")
point_idx = [i for i in range(point.shape[0])]
point_idx = np.array(point_idx)
# points sampling
if self.sampling == "fps":
point_idx = fartherst_point_sampling(point, self.n_point)
point = point[point_idx]
elif self.sampling == "random":
if point.shape[0] >= self.n_point:
point_choice = np.random.choice(point_idx, self.n_point, replace=False)
else:
point_choice = np.random.choice(point_idx, self.n_point, replace=True)
point = point[point_choice, :]
elif self.sampling == "order":
point = point[: self.n_point]
# point = torch.tensor(point)
# vis_points_3d(point, f"./{idx}_sampling.png")
label = self.df.iloc[idx]["label"]
sample = {
"data": point,
"label": label,
"name": path[31:39],
"path": path,
}
return sample
class ShapeNeth5pyDataset(data.Dataset):
def __init__(
self,
root_path: str,
split: str,
normal_class: list,
abnormal_class: list,
n_point: int,
random_rotate: bool = False,
random_jitter: bool = False,
random_translate: bool = False,
) -> None:
split = split.lower()
assert split in [
"train",
"test",
"validation",
], "split must be train, test, or validation"
self.n_point = n_point
self.random_rotate = random_rotate
self.random_jitter = random_jitter
self.random_translate = random_translate
normal_data_paths = []
for n_class in normal_class:
normal_data_paths.append(os.path.join(root_path, split, f"{n_class}.h5"))
normal_data, normal_name = self.load_h5py(normal_data_paths)
normal_data = np.concatenate(normal_data, axis=0)
normal_name = np.concatenate(normal_name, axis=0)
# 正常データをシャッフルする
if split == "test":
tmp = list(zip(normal_data, normal_name))
random.shuffle(tmp)
normal_data, normal_name = zip(*tmp)
self.normal_data = normal_data
self.normal_name = normal_name
self.normal_label = [0] * len(self.normal_data)
if split == "train":
self.abnormal_data = np.array([])
self.abnormal_name = np.array([])
self.abnormal_label = []
else:
abnormal_data_paths = []
for a_class in abnormal_class:
abnormal_data_paths.append(
os.path.join(root_path, split, f"{a_class}.h5")
)
abnormal_data, abnormal_name = self.load_h5py(abnormal_data_paths)
# 異常データをシャッフルする
abnormal_data = np.concatenate(abnormal_data, axis=0)
abnormal_name = np.concatenate(abnormal_name, axis=0)
tmp = list(zip(abnormal_data, abnormal_name))
random.shuffle(tmp)
abnormal_data, abnormal_name = zip(*tmp)
self.abnormal_data = abnormal_data
self.abnormal_name = abnormal_name
self.abnormal_label = [1] * len(self.abnormal_data)
if split == "train":
self.data = self.normal_data
self.name = self.normal_name
self.label = self.normal_label
else:
len_data = min(len(self.normal_data), len(self.abnormal_data))
self.data = np.concatenate(
[self.normal_data[:len_data], self.abnormal_data[:len_data]], axis=0
)
self.name = np.concatenate(
[self.normal_name[:len_data], self.abnormal_name[:len_data]], axis=0
)
self.label = self.normal_label[:len_data] + self.abnormal_label[:len_data]
def load_h5py(self, path: list) -> Tuple[list, list]:
all_data = []
all_label = []
for h5_name in path:
f = h5py.File(h5_name, "r+")
data = f["data"][:].astype("float32")
label = f["label"][:]
f.close()
all_data.append(data)
all_label.append(label)
print(f"{label[0]} : {len(data)}")
return all_data, all_label
def __getitem__(self, idx: int) -> dict:
point_set = self.data[idx][: self.n_point]
name = self.name[idx]
label = self.label[idx]
if self.random_rotate:
point_set = rotate_pointcloud(point_set)
if self.random_jitter:
point_set = jitter_pointcloud(point_set)
if self.random_translate:
point_set = translate_pointcloud(point_set)
point_set = change2positive(point_set)
point_set = uniform_size(point_set)
# convert numpy array to pytorch Tensor
point_set = torch.from_numpy(point_set)
return {"data": point_set, "label": label, "name": name}
def __len__(self):
return self.data.shape[0]
def change2positive(pointcloud):
min_x = min(pointcloud[:, 0])
pointcloud[:, 0] -= np.array([min_x] * pointcloud.shape[0])
min_y = min(pointcloud[:, 1])
pointcloud[:, 1] -= np.array([min_y] * pointcloud.shape[0])
min_z = min(pointcloud[:, 2])
pointcloud[:, 2] -= np.array([min_z] * pointcloud.shape[0])
return pointcloud
def uniform_size(pointcloud):
min_x = min(pointcloud[:, 0])
max_x = max(pointcloud[:, 0])
min_y = min(pointcloud[:, 1])
max_y = max(pointcloud[:, 1])
min_z = min(pointcloud[:, 2])
max_z = max(pointcloud[:, 2])
pointcloud[:, 0] /= max_x - min_x
pointcloud[:, 1] /= max_y - min_y
pointcloud[:, 2] /= max_z - min_z
return pointcloud
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2.0 / 3.0, high=3.0 / 2.0, size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype(
"float32"
)
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
pointcloud = copy.deepcopy(pointcloud)
N, C = pointcloud.shape
# pointcloud += np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
# pointcloud += np.random.normal(0, 0.02, size=pointcloud.shape)
x_min = min(pointcloud[:, 0])
x_max = max(pointcloud[:, 0])
pointcloud[:, 0] -= np.array([x_min] * pointcloud.shape[0])
y_min = min(pointcloud[:, 1])
y_max = max(pointcloud[:, 1])
pointcloud[:, 1] -= np.array([y_min] * pointcloud.shape[0])
z_min = min(pointcloud[:, 2])
z_max = max(pointcloud[:, 2])
pointcloud[:, 2] -= np.array([z_min] * pointcloud.shape[0])
x_jitter = np.random.normal(0, (x_max - x_min) * 0.01, size=(pointcloud.shape[0]))
y_jitter = np.random.normal(0, (y_max - y_min) * 0.01, size=(pointcloud.shape[0]))
z_jitter = np.random.normal(0, (z_max - z_min) * 0.01, size=(pointcloud.shape[0]))
pointcloud[:, 0] += x_jitter
pointcloud[:, 1] += y_jitter
pointcloud[:, 2] += z_jitter
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi * 2 * np.random.choice(24) / 24
rotation_matrix = np.array(
[[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
)
pointcloud[:, [0, 2]] = pointcloud[:, [0, 2]].dot(
rotation_matrix
) # random rotation (x,z)
return pointcloud
class Dataseth5py(data.Dataset):
def __init__(
self,
root,
dataset_name="shapenetcorev2",
num_points=2048,
split="train",
load_name=False,
random_rotate=False,
random_jitter=False,
random_translate=False,
):
assert dataset_name.lower() in [
"shapenetcorev2",
"shapenetpart",
"modelnet10",
"modelnet40",
]
assert num_points <= 2048
if dataset_name in ["shapenetpart", "shapenetcorev2"]:
assert split.lower() in ["train", "test", "val", "trainval", "all"]
else:
assert split.lower() in ["train", "test", "all"]
self.root = os.path.join(root, dataset_name + "*hdf5_2048")
self.dataset_name = dataset_name
self.num_points = num_points
self.split = split
self.load_name = load_name
self.random_rotate = random_rotate
self.random_jitter = random_jitter
self.random_translate = random_translate
self.path_h5py_all = []
self.path_json_all = []
if self.split in ["train", "trainval", "all"]:
self.get_path("train")
if self.dataset_name in ["shapenetpart", "shapenetcorev2"]:
if self.split in ["val", "trainval", "all"]:
self.get_path("val")
if self.split in ["test", "all"]:
self.get_path("test")
self.path_h5py_all.sort()
data, label = self.load_h5py(self.path_h5py_all)
if self.load_name:
self.path_json_all.sort()
self.name = self.load_json(self.path_json_all) # load label name
self.data = np.concatenate(data, axis=0)
self.label = np.concatenate(label, axis=0)
def get_path(self, type):
path_h5py = os.path.join(self.root, "*%s*.h5" % type)
self.path_h5py_all += glob.glob(path_h5py)
if self.load_name:
path_json = os.path.join(self.root, "%s*_id2name.json" % type)
self.path_json_all += glob.glob(path_json)
return
def load_h5py(self, path):
all_data = []
all_label = []
for h5_name in path:
f = h5py.File(h5_name, "r+")
data = f["data"][:].astype("float32")
label = f["label"][:].astype("int64")
f.close()
all_data.append(data)
all_label.append(label)
return all_data, all_label
def load_json(self, path):
all_data = []
for json_name in path:
j = open(json_name, "r+")
data = json.load(j)
all_data += data
return all_data
def __getitem__(self, item):
point_set = self.data[item][: self.num_points]
label = self.label[item]
if self.load_name:
name = self.name[item] # get label name
if self.random_rotate:
point_set = rotate_pointcloud(point_set)
if self.random_jitter:
point_set = jitter_pointcloud(point_set)
if self.random_translate:
point_set = translate_pointcloud(point_set)
# convert numpy array to pytorch Tensor
point_set = torch.from_numpy(point_set)
label = torch.from_numpy(np.array([label]).astype(np.int64))
label = label.squeeze(0)
if self.load_name:
return {"data": point_set, "label": label, "name": name}
else:
return {"data": point_set, "label": label}
def __len__(self):
return self.data.shape[0]
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/load_obj.py | .py | 309 | 16 | def loadOBJ(filepath: str) -> list:
file = open(filepath, "r")
vertices = []
for line in file:
vals = line.split()
if len(vals) == 0:
continue
if vals[0] == "v":
v = list(map(float, vals[1:4]))
vertices.append(v)
return vertices
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/vis_histogram.py | .py | 839 | 29 | import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def vis_histgram(csv_file: str, save_dir: str) -> None:
df = pd.read_csv(csv_file, index_col=0)
label = df["1"]
result = df["2"]
normal_result = []
abnormal_result = []
for lbl, r in zip(label, result):
if lbl[7] == "0":
normal_result.append(r * 1000)
else:
abnormal_result.append(r * 1000)
bin_max = max(max(normal_result), max(abnormal_result))
bins = np.linspace(0, bin_max, 100)
plt.hist(normal_result, bins, alpha=0.5, label="normal")
plt.hist(abnormal_result, bins, alpha=0.5, label="abnormal")
plt.legend(loc="upper left")
plt.xlabel("Anomaly Score")
plt.ylabel("Number of samples")
plt.savefig(os.path.join(save_dir, "histgram.png"))
plt.close()
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/visualize.py | .py | 818 | 39 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def vis_points_3d(points, save_name):
# print(pos_events.shape)
points = points.to("cpu").detach().numpy()
fig = plt.figure()
ax = Axes3D(fig)
# fig = fig.add_subplot(111, projection="3d")
ax.plot(
points[:, 2],
points[:, 0],
points[:, 1],
c="red",
marker="o",
linestyle="None",
)
plt.savefig(save_name)
plt.close(fig)
def vis_points_2d(points, save_name):
# print(pos_events.shape)
points = points.to("cpu").detach().numpy()
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(
points[:, 0],
points[:, 1],
c="red",
marker="o",
linestyle="None",
)
plt.savefig(save_name)
plt.close(fig)
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/meter.py | .py | 597 | 24 | class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/__init__.py | .py | 96 | 5 | from .dataset import *
from .foldingnet import *
from .helper import *
from .visualize import *
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/checkpoint.py | .py | 1,073 | 44 | import os
from typing import Tuple
import torch
import torch.nn as nn
import torch.optim as optim
def save_checkpoint(
result_path: str,
epoch: int,
model: nn.Module,
optimizer: optim.Optimizer,
) -> None:
save_states = {
"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
torch.save(save_states, os.path.join(result_path, f"{epoch}.pth"))
def resume(
resume_path: str,
model: nn.Module,
optimizer: optim.Optimizer,
) -> Tuple[int, nn.Module, optim.Optimizer]:
assert os.path.exists(resume_path), "there is no checkpoint at the result folder"
print("loading checkpoint {}".format(resume_path))
checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
begin_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print("Successfly loaded the weight of {} epoch".format(begin_epoch))
return begin_epoch, model, optimizer
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/sampling.py | .py | 1,372 | 44 | import numpy as np
def l2_norm(x, y):
"""Calculate l2 norm (distance) of `x` and `y`.
Args:
x (numpy.ndarray or cupy): (batch_size, num_point, coord_dim)
y (numpy.ndarray): (batch_size, num_point, coord_dim)
Returns (numpy.ndarray): (batch_size, num_point)
"""
return ((x - y) ** 2).sum(axis=1)
def fartherst_point_sampling(
points: np.ndarray,
num_sample_point: int,
initial_idx=None,
metrics=l2_norm,
indices_dtype=np.int32,
distances_dtype=np.float32,
) -> np.ndarray:
assert points.ndim == 2, "input points shoud be 2-dim array (n_points, coord_dim)"
num_point, coord_dim = points.shape
indices = np.zeros((num_sample_point,), dtype=indices_dtype)
distances = np.zeros((num_sample_point, num_point), dtype=distances_dtype)
if initial_idx is None:
indices[0] = np.random.randint(len(points))
else:
indices[0] = initial_idx
farthest_point = points[indices[0]]
min_distances = metrics(farthest_point[None, :], points)
distances[0, :] = min_distances
for i in range(1, num_sample_point):
indices[i] = np.argmax(min_distances, axis=0)
farthest_point = points[indices[i]]
dist = metrics(farthest_point[None, :], points)
distances[i, :] = dist
min_distances = np.minimum(min_distances, dist)
return indices
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/helper.py | .py | 15,778 | 477 | import os
import time
from typing import Any, List, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import wandb
from torch import optim
from torch.distributions import Categorical
from torch.utils.data import DataLoader
from .emd.emd_module import emdModule
from .loss import ChamferLoss, mse_loss
from .meter import AverageMeter
from .vis_histgram import vis_histgram
from .visualize import vis_points_3d
matplotlib.use("Agg")
def train_foldingnet(
loader: DataLoader,
model: nn.Module,
reconstruction_loss: str,
optimizer: optim.Optimizer,
weight: List,
epoch: int,
device: str,
save_dir: str,
) -> Tuple[float, float, float, float, float]:
loss_meter = AverageMeter("loss", ":.4e")
inner_loss_meter = AverageMeter("inner_loss", ":.4e")
out_loss_meter = AverageMeter("out_loss", ":.4e")
feat_loss_meter = AverageMeter("feat_loss", ":.4e")
inf_loss_meter = AverageMeter("inf_loss", ":4e")
if reconstruction_loss == "CD":
inner_criterion = ChamferLoss()
out_criterion = ChamferLoss()
elif reconstruction_loss == "EMD":
# inner_criterion = emdModule()
inner_criterion = ChamferLoss()
out_criterion1 = ChamferLoss()
out_criterion2 = emdModule()
# switch to train mode
model.train()
softmax = nn.Softmax(dim=2)
t_epoch_start = time.time()
for samples in loader:
points = samples["data"].float()
points = points.to(device)
# Forword Pass
output, folding1, feature = model(points)
_, _, fake_feature = model(output)
# points = points.cpu().detach().numpy()
# folding1 = folding1.cpu().detach().numpy()
# output = output.cpu().detach().numpy()
if reconstruction_loss == "CD":
inner_loss = inner_criterion(points, folding1)
out_loss = out_criterion(points, output)
elif reconstruction_loss == "EMD":
inner_loss = inner_criterion(points, folding1)
if epoch < 100:
out_loss = out_criterion1(points, output)
else:
out_loss, _ = out_criterion2(points, output, 0.005, 50)
out_loss = torch.sqrt(out_loss).mean(1)
out_loss = out_loss.mean()
# print(inner_loss)
# inner_loss = wasserstein_distance(points, folding1)
# out_loss = wasserstein_distance(points, output)
# inner_loss = torch.from_numpy(inner_loss.to(device))
# out_loss = torch.from_numpy(out_loss.to(device))
softmax_feat = softmax(feature)
inf_loss = torch.sum(Categorical(probs=softmax_feat).entropy())
feat_loss = mse_loss(feature, fake_feature)
w_in = weight[0]
w_out = weight[1]
w_feat = weight[2]
w_inf = weight[3]
if reconstruction_loss == "CD":
if w_in == 0 and w_inf == 0:
loss = out_loss
elif w_inf == 0:
loss = w_in * inner_loss + w_out * out_loss
elif w_in != 0:
loss = w_in * inner_loss + w_out * out_loss + w_inf * inf_loss
if w_feat != 0 and epoch > 100:
loss += w_feat * feat_loss
else:
if w_inf == 0:
loss = w_in * inner_loss
else:
loss = w_in * inner_loss + w_inf * inf_loss
if w_feat != 0 and epoch > 100:
loss += w_feat * feat_loss
if epoch > 100:
loss += w_out * out_loss
inner_loss_meter.update(inner_loss.item())
out_loss_meter.update(out_loss.item())
feat_loss_meter.update(feat_loss.item())
inf_loss_meter.update(inf_loss.item())
loss_meter.update(loss.item())
# Backword Pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_time = time.time() - t_epoch_start
epoch_loss = loss_meter.avg
epoch_inner_loss = inner_loss_meter.avg
epoch_out_loss = out_loss_meter.avg
epoch_feat_loss = feat_loss_meter.avg
epoch_inf_loss = inf_loss_meter.avg
if not os.path.exists(os.path.join(save_dir, "original")):
os.makedirs(os.path.join(save_dir, "original"))
if not os.path.exists(os.path.join(save_dir, "folding1")):
os.makedirs(os.path.join(save_dir, "folding1"))
if not os.path.exists(os.path.join(save_dir, "reconstructed")):
os.makedirs(os.path.join(save_dir, "reconstructed"))
vis_points_3d(
points[0],
save_dir + f"/original/{epoch}.png",
)
point = points[0].to("cpu").detach().numpy()
np.save(save_dir + f"/original/{epoch}.npy", point)
vis_points_3d(
folding1[0],
save_dir + f"/folding1/{epoch}.png",
)
folding = folding1[0].to("cpu").detach().numpy()
np.save(save_dir + f"/folding1/{epoch}.npy", folding)
vis_points_3d(
output[0],
save_dir + f"/reconstructed/{epoch}.png",
)
out_point = output[0].to("cpu").detach().numpy()
np.save(save_dir + f"/reconstructed/{epoch}.npy", out_point)
return (
epoch_loss,
epoch_inner_loss,
epoch_out_loss,
epoch_feat_loss,
epoch_inf_loss,
epoch_time,
)
def evaluate_foldingnet(
loader: DataLoader,
model: nn.Module,
epoch: int,
device: str,
save_dir: str,
) -> Tuple[float, float, float, float, float, float]:
# 保存ディレクトリの作成
save_normal = os.path.join(save_dir, f"epoch{epoch}/normal")
save_abnormal = os.path.join(save_dir, f"epoch{epoch}/abnormal")
if not os.path.exists(save_normal):
os.makedirs(save_normal)
if not os.path.exists(save_abnormal):
os.makedirs(save_abnormal)
eval_criterion = ChamferLoss()
# switch to evalutation mode
model.eval()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pred = []
labels = [""] * len(loader.dataset)
names = [""] * len(loader.dataset)
points = [""] * len(loader.dataset)
folding1s = [""] * len(loader.dataset)
out_points = [""] * len(loader.dataset)
n = 0
feature_vec = []
diff_vec = []
chamferloss = []
for samples in loader:
data = samples["data"].float()
label = samples["label"]
name = samples["name"]
mini_batch_size = data.size()[0]
points[n : n + mini_batch_size] = data
data = data.to(device)
with torch.no_grad():
output, folding1, real_feat = model(data)
_, _, fake_feat = model(output)
folding1s[n : n + mini_batch_size] = folding1
out_points[n : n + mini_batch_size] = output
n_input_points = data.shape[1]
n_output_points = output.shape[1]
for d, o in zip(data, output):
d = d.reshape(1, n_input_points, -1)
o = o.reshape(1, n_output_points, -1)
cl = eval_criterion(d, o)
cl = cl.tolist()
chamferloss.append(cl)
test_feat = real_feat - fake_feat
test_feat = test_feat.to("cpu")
real_feat = real_feat.to("cpu").squeeze()
for rf in real_feat:
rf = rf.tolist()
feature_vec.append(rf)
for tf in test_feat:
tf = tf.squeeze()
tf = tf.tolist()
diff_vec.append(tf)
for i in range(test_feat.shape[0]):
vec = test_feat[i]
vec = vec.reshape(-1)
score = np.mean(np.power(vec.numpy(), 2.0))
pred.append(score)
labels[n : n + mini_batch_size] = label.reshape(mini_batch_size)
names[n : n + mini_batch_size] = name
n += mini_batch_size
# T-SNE visualization
# feature vector
feat_reduced = TSNE(n_components=2).fit_transform(feature_vec)
plt.scatter(feat_reduced[:, 0], feat_reduced[:, 1], c=labels)
plt.savefig(os.path.join(save_dir, f"epoch{epoch}/feat_tsne.png"))
plt.close()
# diff vector
diff_feat_reduced = TSNE(n_components=2).fit_transform(diff_vec)
plt.scatter(diff_feat_reduced[:, 0], diff_feat_reduced[:, 1], c=labels)
plt.savefig(os.path.join(save_dir, f"epoch{epoch}/diff_tsne.png"))
plt.close()
if not os.path.exists(os.path.join(save_dir, "result_point/original")):
os.makedirs(os.path.join(save_dir, "result_point/original"))
if not os.path.exists(os.path.join(save_dir, "result_point/reconstructed")):
os.makedirs(os.path.join(save_dir, "result_point/reconstructed"))
if epoch == 200:
cnt = 0
for point in points:
point = point.to("cpu").detach().numpy()
np.save(os.path.join(save_dir, f"result_point/original/{cnt}.npy"), point)
cnt += 1
cnt = 0
for out_point in out_points:
out_point = out_point.to("cpu").detach().numpy()
np.save(
os.path.join(save_dir, f"result_point/reconstructed/{cnt}.npy"),
out_point,
)
cnt += 1
# save result
df = pd.DataFrame(list(zip(names, labels, pred, points, folding1s, out_points)))
df.to_csv(os.path.join(save_dir, f"epoch{epoch}/result.csv"))
vis_histgram(
os.path.join(save_dir, f"epoch{epoch}/result.csv"),
os.path.join(save_dir, f"epoch{epoch}"),
)
# pred = np.array(chamferloss)
_min = min(pred)
_max = max(pred)
re_scaled = (pred - _min) / (_max - _min)
re_scaled = np.array(re_scaled)
fpr, tpr, _ = roc_curve(labels, re_scaled)
roc_auc = auc(fpr, tpr)
thresh = 0.2
re_scaled[re_scaled >= thresh] = 1
re_scaled[re_scaled < thresh] = 0
acc = accuracy_score(labels, re_scaled)
prec = precision_score(labels, re_scaled)
rec = recall_score(labels, re_scaled)
f1 = f1_score(labels, re_scaled)
avg_prec = average_precision_score(labels, re_scaled)
return roc_auc, acc, prec, rec, f1, avg_prec
def train_variational_foldingnet(
loader: DataLoader,
model: nn.Module,
reconstruction_loss: str,
optimizer: optim.Optimizer,
weight: List,
epoch: int,
device: str,
save_dir: str,
) -> Tuple[float, float, float, float, float]:
loss_meter = AverageMeter("loss", ":.4e")
inner_loss_meter = AverageMeter("inner_loss", ":.4e")
out_loss_meter = AverageMeter("out_loss", ":.4e")
kld_loss_meter = AverageMeter("kld_loss", ":4e")
fake_kld_loss_meter = AverageMeter("fake_kld_loss", ":4e")
if reconstruction_loss == "CD":
inner_criterion = ChamferLoss()
out_criterion = ChamferLoss()
elif reconstruction_loss == "EMD":
# inner_criterion = emdModule()
inner_criterion = ChamferLoss()
out_criterion = emdModule()
# switch to train mode
model.train()
softmax = nn.Softmax(dim=2)
t_epoch_start = time.time()
for samples in loader:
points = samples["data"].float()
points = points.to(device)
# Forword Pass
output, folding1, mu, sigma = model(points)
_, _, fake_mu, fake_sigma = model(output)
if reconstruction_loss == "CD":
inner_loss = inner_criterion(points, folding1)
out_loss = out_criterion(points, output)
elif reconstruction_loss == "EMD":
inner_loss = inner_criterion(points, folding1)
out_loss, _ = out_criterion(points, output, 0.005, 50)
out_loss = torch.sqrt(out_loss).mean(1)
out_loss = out_loss.mean()
# softmax_feat = softmax(feature)
# inf_loss = torch.sum(Categorical(probs=softmax_feat).entropy())
"""KL Divergence between N(mu, sigma^2) and N(0, 1)"""
mu = torch.squeeze(mu)
sigma = torch.squeeze(sigma)
kld_loss = torch.mean(
0.5
* torch.sum(
mu ** 2 + sigma ** 2 - torch.log(sigma ** 2 + 1e-12) - 1, dim=1
),
dim=0,
)
# """KL Divergence between N(mu, sigma^2) and N(fake_mu, fake_sigma^2)"""
# fake_mu = torch.squeeze(fake_mu)
# fake_sigma = torch.squeeze(fake_sigma)
# fake_kld_loss = torch.mean(
# 0.5
# * torch.sum(
# torch.log(fake_sigma ** 2 + 1e-12)
# - torch.log(sigma ** 2 + 1e-12)
# + (sigma ** 2 + (mu - fake_mu) ** 2) / fake_sigma ** 2
# - 1,
# dim=1,
# ),
# dim=0,
# )
"""KL Divergence between N(fake_mu, fake_sigma^2) and N(0, 1)"""
fake_mu = torch.squeeze(fake_mu)
fake_sigma = torch.squeeze(fake_sigma)
fake_kld_loss = torch.mean(
0.5
* torch.sum(
fake_mu ** 2 + fake_sigma ** 2 - torch.log(fake_sigma ** 2 + 1e-12) - 1,
dim=1,
),
dim=0,
)
# G = torch.distributions.Normal(0, 1)
# P = torch.distributions.Normal(mu, log_var)
# Q = torch.distributions.Normal(fake_mu, fake_log_var)
# kld_loss = torch.distributions.kl_divergence(G, P).mean()
# fake_kld_loss = torch.distributions.kl_divergence(G, Q).mean()
w_in = weight[0]
w_out = weight[1]
w_kld = weight[2]
w_fake_kld = weight[3]
if reconstruction_loss == "CD":
if w_in == 0 and w_fake_kld == 0:
loss = w_out * out_loss + w_kld * kld_loss
elif w_fake_kld == 0:
loss = w_in * inner_loss + w_out * out_loss + w_kld * kld_loss
else:
loss = (
w_in * inner_loss
+ w_out * out_loss
+ w_kld * kld_loss
+ w_fake_kld * fake_kld_loss
)
else:
if w_in == 0:
loss = w_out * out_loss + w_kld * kld_loss
else:
loss = w_in * inner_loss + w_out * out_loss + w_kld * kld_loss
inner_loss_meter.update(inner_loss.item())
out_loss_meter.update(out_loss.item())
kld_loss_meter.update(kld_loss.item())
fake_kld_loss_meter.update(fake_kld_loss.item())
loss_meter.update(loss.item())
# Backword Pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_time = time.time() - t_epoch_start
epoch_loss = loss_meter.avg
epoch_inner_loss = inner_loss_meter.avg
epoch_out_loss = out_loss_meter.avg
epoch_kld_loss = kld_loss_meter.avg
epoch_fake_kld_loss = fake_kld_loss_meter.avg
if not os.path.exists(os.path.join(save_dir, "original")):
os.makedirs(os.path.join(save_dir, "original"))
if not os.path.exists(os.path.join(save_dir, "folding1")):
os.makedirs(os.path.join(save_dir, "folding1"))
if not os.path.exists(os.path.join(save_dir, "reconstructed")):
os.makedirs(os.path.join(save_dir, "reconstructed"))
vis_points_3d(
points[0],
save_dir + f"/original/{epoch}.png",
)
point = points[0].to("cpu").detach().numpy()
np.save(save_dir + f"/original/{epoch}.npy", point)
vis_points_3d(
folding1[0],
save_dir + f"/folding1/{epoch}.png",
)
folding = folding1[0].to("cpu").detach().numpy()
np.save(save_dir + f"/folding1/{epoch}.npy", folding)
vis_points_3d(
output[0],
save_dir + f"/reconstructed/{epoch}.png",
)
out_point = output[0].to("cpu").detach().numpy()
np.save(save_dir + f"/reconstructed/{epoch}.npy", out_point)
return (
epoch_loss,
epoch_inner_loss,
epoch_out_loss,
epoch_kld_loss,
epoch_fake_kld_loss,
epoch_time,
)
| Python |
3D | llien30/point_cloud_anomaly_detection | libs/foldingnet.py | .py | 13,746 | 418 | import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .visualize import vis_points_3d
def knn(x: torch.tensor, k: int) -> int:
batch_size = x.size(0)
num_points = x.size(2)
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
if idx.get_device() == -1:
idx_base = torch.arange(0, batch_size).view(-1, 1, 1) * num_points
else:
idx_base = (
torch.arange(0, batch_size, device=idx.get_device()).view(-1, 1, 1)
* num_points
)
idx = idx + idx_base
idx = idx.view(-1)
return idx
def local_cov(pts: torch.tensor, idx: int) -> torch.tensor:
batch_size = pts.size(0)
num_points = pts.size(2)
pts = pts.view(batch_size, -1, num_points) # (batch_size, 3, num_points)
_, num_dims, _ = pts.size()
x = pts.transpose(2, 1).contiguous() # (batch_size, num_points, 3)
x = x.view(batch_size * num_points, -1)[idx, :] # (batch_size*num_points*2, 3)
x = x.view(batch_size, num_points, -1, num_dims) # (batch_size, num_points, k, 3)
x = torch.matmul(x[:, :, 0].unsqueeze(3), x[:, :, 1].unsqueeze(2))
# (batch_size, num_points, 3, 1) * (batch_size, num_points, 1, 3)
# -> (batch_size, num_points, 3, 3)
# x = torch.matmul(x[:,:,1:].transpose(3, 2), x[:,:,1:])
x = x.view(batch_size, num_points, 9).transpose(2, 1) # (batch_size, 9, num_points)
x = torch.cat((pts, x), dim=1) # (batch_size, 12, num_points)
return x
def local_maxpool(x: torch.tensor, idx: int) -> torch.tensor:
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
_, num_dims, _ = x.size()
# (batch_size, num_points, num_dims)
x = x.transpose(2, 1).contiguous()
# (batch_size*n, num_dims) -> (batch_size*n*k, num_dims)
x = x.view(batch_size * num_points, -1)[idx, :]
# (batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, -1, num_dims)
# (batch_size, num_points, num_dims)
x, _ = torch.max(x, dim=2)
return x
def get_graph_feature(x: torch.tensor, k=20, idx=None) -> torch.tensor:
batch_size = x.size(0)
num_points = x.size(2)
# (batch_size, num_dims, num_points)
x = x.view(batch_size, -1, num_points)
if idx is None:
# (batch_size, num_points, k)
idx = knn(x, k=k)
_, num_dims, _ = x.size()
# (batch_size, num_points, num_dims)
x = x.transpose(2, 1).contiguous()
# (batch_size*n, num_dims) -> (batch_size*n*k, num_dims)
feature = x.view(batch_size * num_points, -1)[idx, :]
# (batch_size, num_points, k, num_dims)
feature = feature.view(batch_size, num_points, k, num_dims)
# (batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
# (batch_size, num_points, k, 2*num_dims) -> (batch_size, 2*num_dims, num_points, k)
feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2)
# (batch_size, 2*num_dims, num_points, k)
return feature
class FoldingNetEncoder(nn.Module):
def __init__(self, n_points: int, feat_dims: int) -> None:
super().__init__()
self.n_points = n_points
self.k = 16
self.mlp1 = nn.Sequential(
nn.Conv1d(12, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
)
self.linear1 = nn.Linear(64, 64)
self.conv1 = nn.Conv1d(64, 128, 1)
self.linear2 = nn.Linear(128, 128)
self.conv2 = nn.Conv1d(128, 1024, 1)
self.mlp2 = nn.Sequential(
nn.Conv1d(1024, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, feat_dims, 1),
)
def graph_layer(self, x: torch.tensor, idx: int) -> torch.tensor:
x = local_maxpool(x, idx)
x = self.linear1(x)
x = x.transpose(2, 1)
x = F.relu(self.conv1(x))
x = local_maxpool(x, idx)
x = self.linear2(x)
x = x.transpose(2, 1)
x = self.conv2(x)
return x
def forward(self, pts: torch.tensor) -> torch.tensor:
# (batch_size, 3, num_points)
pts = pts.transpose(2, 1)
idx = knn(pts, k=self.k)
# (batch_size, 3, num_points) -> (batch_size, 12, num_points])
x = local_cov(pts, idx)
# (batch_size, 12, num_points) -> (batch_size, 64, num_points])
x = self.mlp1(x)
# (batch_size, 64, num_points) -> (batch_size, 1024, num_points)
x = self.graph_layer(x, idx)
# (batch_size, 1024, num_points) -> (batch_size, 1024, 1)
x = torch.max(x, 2, keepdim=True)[0]
# (batch_size, 1024, 1) -> (batch_size, feat_dims, 1)
x = self.mlp2(x)
# (batch_size, feat_dims, 1) -> (batch_size, 1, feat_dims)
feat = x.transpose(2, 1)
return feat # (batch_size, 1, feat_dims)
class FoldingNetDecoder(nn.Module):
def __init__(self, feat_dims: int, shape="plane") -> None:
super().__init__()
self.m = 2048 # 45 * 45.
self.shape = shape
self.sphere = np.load(f"./grids/sphere_{self.m}.npy")
self.gaussian = np.load("./grids/gaussian.npy")
self.meshgrid = [[-0.3, 0.3, 32], [-0.6, 0.6, 64]]
if self.shape == "plane":
self.folding1 = nn.Sequential(
nn.Conv1d(feat_dims + 2, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, 3, 1),
)
else:
self.folding1 = nn.Sequential(
nn.Conv1d(feat_dims + 3, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, 3, 1),
)
self.folding2 = nn.Sequential(
nn.Conv1d(feat_dims + 3, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, 3, 1),
)
def build_grid(self, batch_size: int) -> torch.tensor:
# assert self.shape == "plane", "shape should be 'plane'."
if self.shape == "plane":
x = np.linspace(*self.meshgrid[0])
y = np.linspace(*self.meshgrid[1])
points = np.array(list(itertools.product(x, y)))
elif self.shape == "sphere":
points = self.sphere
elif self.shape == "gaussian":
points = self.gaussian
points = np.repeat(points[np.newaxis, ...], repeats=batch_size, axis=0)
points = torch.tensor(points)
return points.float()
def forward(self, x: torch.tensor) -> torch.tensor:
x = x.transpose(1, 2).repeat(1, 1, self.m)
points = self.build_grid(x.shape[0]).transpose(1, 2)
# vis_points_3d(points[0].transpose(0, 1), "meshgrid.png")
if x.get_device() != -1:
points = points.cuda(x.get_device())
cat1 = torch.cat((x, points), dim=1)
folding_result1 = self.folding1(cat1)
# vis_points_3d(folding_result1[0].transpose(0, 1), "folding1.png")
cat2 = torch.cat((x, folding_result1), dim=1)
folding_result2 = self.folding2(cat2)
# print(folding_result2.shape)
# vis_points_3d(folding_result2[0].transpose(0, 1), "folding2.png")
return folding_result2.transpose(1, 2), folding_result1.transpose(1, 2)
class FoldingNet(nn.Module):
def __init__(self, n_points: int, feat_dims: int, shape: str) -> None:
super().__init__()
self.encoder = FoldingNetEncoder(n_points, feat_dims)
self.decoder = FoldingNetDecoder(feat_dims, shape=shape)
self.softmax = nn.Softmax(dim=2)
def forward(self, input: torch.tensor):
feature = self.encoder(input)
# feature = self.softmax(feature)
folding2, folding1 = self.decoder(feature)
return folding2, folding1, feature
def get_parameter(self):
return list(self.encoder.parameters()) + list(self.decoder.parameters())
class SkipFoldingNetEncoder(nn.Module):
def __init__(self, n_points: int, feat_dims: int) -> None:
super().__init__()
self.n_points = n_points
self.k = 16
self.mlp1 = nn.Sequential(
nn.Conv1d(12, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
)
self.linear1 = nn.Linear(64, 64)
self.conv1 = nn.Conv1d(64, 128, 1)
self.linear2 = nn.Linear(128, 128)
self.conv2 = nn.Conv1d(128, 1024, 1)
self.mlp2 = nn.Sequential(
nn.Conv1d(1024 + 64, feat_dims, 1),
nn.ReLU(),
nn.Conv1d(feat_dims, feat_dims, 1),
)
def graph_layer(self, x: torch.tensor, idx: int) -> torch.tensor:
x = local_maxpool(x, idx)
x = self.linear1(x)
x = x.transpose(2, 1)
x = F.relu(self.conv1(x))
x = local_maxpool(x, idx)
x = self.linear2(x)
x = x.transpose(2, 1)
x = self.conv2(x)
return x
def forward(self, pts: torch.tensor) -> torch.tensor:
pts = pts.transpose(2, 1)
idx = knn(pts, k=self.k)
x = local_cov(pts, idx)
x = self.mlp1(x)
local_feat_1 = x
x = self.graph_layer(x, idx)
local_feat_2 = x
cat_feat = torch.cat([local_feat_1, local_feat_2], 1)
x = torch.max(cat_feat, 2, keepdim=True)[0]
x = self.mlp2(x)
feat = x.transpose(2, 1)
return feat
class SkipFoldingNet(nn.Module):
def __init__(self, n_points: int, feat_dims: int, shape: str) -> None:
super().__init__()
self.encoder = SkipFoldingNetEncoder(n_points, feat_dims)
self.decoder = FoldingNetDecoder(feat_dims, shape=shape)
self.softmax = nn.Softmax(dim=2)
def forward(self, input: torch.tensor):
feature = self.encoder(input)
# feature = self.softmax(feature)
folding2, folding1 = self.decoder(feature)
return folding2, folding1, feature
def get_parameter(self):
return list(self.encoder.parameters()) + list(self.decoder.parameters())
class SkipVariationalEncoder(nn.Module):
def __init__(self, n_points: int, feat_dims: int) -> None:
super().__init__()
self.n_points = n_points
self.k = 16
self.feat_dims = feat_dims
self.mlp1 = nn.Sequential(
nn.Conv1d(12, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.ReLU(),
)
self.linear1 = nn.Linear(64, 64)
self.conv1 = nn.Conv1d(64, 128, 1)
self.linear2 = nn.Linear(128, 128)
self.conv2 = nn.Conv1d(128, 1024, 1)
# self.fc_mu = nn.Sequential(
# nn.Conv1d(1024 + 64, feat_dims, 1),
# nn.ReLU(),
# nn.Conv1d(feat_dims, feat_dims, 1),
# )
# self.fc_var = nn.Sequential(
# nn.Conv1d(1024 + 64, feat_dims, 1),
# nn.ReLU(),
# nn.Conv1d(feat_dims, feat_dims, 1),
# )
self.fc_mu = nn.Conv1d(1024 + 64, feat_dims, 1)
self.fc_var = nn.Conv1d(1024 + 64, feat_dims, 1)
def graph_layer(self, x: torch.tensor, idx: int) -> torch.tensor:
x = local_maxpool(x, idx)
x = self.linear1(x)
x = x.transpose(2, 1)
x = F.relu(self.conv1(x))
x = local_maxpool(x, idx)
x = self.linear2(x)
x = x.transpose(2, 1)
x = self.conv2(x)
return x
def forward(self, pts: torch.tensor) -> torch.tensor:
pts = pts.transpose(2, 1)
idx = knn(pts, k=self.k)
x = local_cov(pts, idx)
x = self.mlp1(x)
local_feat_1 = x
x = self.graph_layer(x, idx)
local_feat_2 = x
cat_feat = torch.cat([local_feat_1, local_feat_2], 1)
x = torch.max(cat_feat, 2, keepdim=True)[0]
mu = self.fc_mu(x)
sigma = self.fc_var(x)
return mu, sigma
class SkipValiationalFoldingNet(nn.Module):
def __init__(self, n_points: int, feat_dims: int, shape: str) -> None:
super().__init__()
self.encoder = SkipVariationalEncoder(n_points, feat_dims)
self.decoder = FoldingNetDecoder(feat_dims, shape=shape)
self.softmax = nn.Softmax(dim=2)
def sample_z(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""
:mu: (Tensor) Mean of the latent Gaussian
:sigma: (Tensor) Standard deviation of the latent Gaussian
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: torch.tensor):
mu, sigma = self.encoder(input)
mu = mu.transpose(2, 1)
sigma = sigma.transpose(2, 1)
# feature = self.softmax(feature)
feature = self.sample_z(mu, sigma)
folding2, folding1 = self.decoder(feature)
return folding2, folding1, mu, sigma
def get_parameter(self):
return list(self.encoder.parameters()) + list(self.decoder.parameters())
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.uniform_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
| Python |
3D | llien30/point_cloud_anomaly_detection | utils/make_sphere.py | .py | 494 | 27 | import random
from math import cos, pi, sin
import numpy as np
def make_sphere(N: int) -> None:
points = []
for i in range(N):
theta = 2 * pi * random.random()
phi = 2 * pi * random.random()
x = sin(phi) * cos(theta)
y = sin(phi) * sin(theta)
z = cos(phi)
points.append([x, y, z])
points = np.array(points)
np.save(f"sphere_{N}.npy", points)
def main() -> None:
make_sphere(5120)
if __name__ == "__main__":
main()
| Python |
3D | kuangxh9/SuperWater | organize_pdb_dataset.py | .py | 3,204 | 72 | import os
import shutil
from collections import defaultdict
from tqdm import tqdm
from argparse import ArgumentParser
parser = ArgumentParser(description="Process PDB files and organize dataset.")
parser.add_argument("--raw_data", type=str, required=True, help="Name of the dataset folder containing PDB files.")
parser.add_argument("--data_root", type=str, default="data", help="Root directory where dataset folder is located.")
parser.add_argument("--output_dir", type=str, default="test_data", help="Directory to store processed test data.")
parser.add_argument("--splits_path", type=str, default="data/splits", help="Directory to save splits.")
parser.add_argument("--dummy_water_dir", type=str, default="data/dummy_water", help="Directory containing dummy water files.")
parser.add_argument("--logs_dir", type=str, default="logs", help="Directory to save logs.")
args = parser.parse_args()
data_path = os.path.join(args.data_root, args.raw_data)
test_data_path = os.path.join('data', args.output_dir)
dummy_water_path = args.dummy_water_dir
logs_path = args.logs_dir
splits_path = args.splits_path
os.makedirs(test_data_path, exist_ok=True)
os.makedirs(logs_path, exist_ok=True)
os.makedirs(splits_path, exist_ok=True)
pdb_files = [f for f in os.listdir(data_path) if f.endswith(".pdb")]
pdb_id_dict = {}
duplicate_truncate_pdb_id = []
truncate_map = defaultdict(list)
successful_pdb_ids = []
for pdb_file in tqdm(pdb_files, desc="Processing PDB files"):
original_pdb_id = pdb_file.replace(".pdb", "")
truncated_pdb_id = original_pdb_id[:4]
if truncated_pdb_id in truncate_map:
duplicate_truncate_pdb_id.append(original_pdb_id)
else:
pdb_id_dict[original_pdb_id] = truncated_pdb_id
truncate_map[truncated_pdb_id].append(original_pdb_id)
unique_pdb_id_dict = {v[0]: k for k, v in truncate_map.items() if len(v) == 1}
for original_pdb_id, truncated_pdb_id in tqdm(unique_pdb_id_dict.items(), desc="Copying and renaming PDB files"):
dest_folder = os.path.join(test_data_path, truncated_pdb_id)
os.makedirs(dest_folder, exist_ok=True)
successful_pdb_ids.append(truncated_pdb_id)
src_file = os.path.join(data_path, f"{original_pdb_id}.pdb")
dest_file = os.path.join(dest_folder, f"{truncated_pdb_id}_protein_processed.pdb")
shutil.copy(src_file, dest_file)
for water_ext in ["mol2", "pdb"]:
water_src = os.path.join(dummy_water_path, f"_water.{water_ext}")
water_dest = os.path.join(dest_folder, f"{truncated_pdb_id}_water.{water_ext}")
shutil.copy(water_src, water_dest)
log_file_path = os.path.join(logs_path, "duplicate_truncate_pdb_id.txt")
with open(log_file_path, "w") as log_file:
for dup_id in duplicate_truncate_pdb_id:
log_file.write(f"{dup_id}\n")
split_file_path = os.path.join(splits_path, f"{args.output_dir}.txt")
with open(split_file_path, "w") as split_file:
for pdb_id in successful_pdb_ids:
split_file.write(f"{pdb_id}\n")
print("Processing completed.")
print(f"Successful saved test PDB IDs to {split_file_path}.")
print(f"Duplicate truncated IDs saved to {log_file_path}.")
| Python |
3D | kuangxh9/SuperWater | train.py | .py | 6,739 | 157 | import copy
import math
import os
from functools import partial
import numpy as np
import random
import wandb
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (64000, rlimit[1]))
import yaml
from utils.diffusion_utils import t_to_sigma as t_to_sigma_compl
from datasets.pdbbind import construct_loader
from utils.parsing import parse_train_args
from utils.training import train_epoch, test_epoch, loss_function
from utils.utils import save_yaml_file, get_optimizer_and_scheduler, get_model, ExponentialMovingAverage
def train(args, model, optimizer, scheduler, ema_weights, train_loader, val_loader, infer_loader, t_to_sigma, run_dir):
best_val_loss = math.inf
best_val_inference_value = math.inf if args.inference_earlystop_goal == 'min' else 0
best_epoch = 0
best_val_inference_epoch = 0
loss_fn = partial(loss_function, tr_weight=args.tr_weight)
print("Starting training...")
for epoch in range(args.n_epochs):
if epoch % 5 == 0: print("Run name: ", args.run_name)
logs = {}
train_losses = train_epoch(model, train_loader, optimizer, device, t_to_sigma, loss_fn, ema_weights)
print("Epoch {}: Training loss {:.4f} tr {:.4f} "
.format(epoch, train_losses['loss'], train_losses['tr_loss']))
ema_weights.store(model.parameters())
if args.use_ema: ema_weights.copy_to(model.parameters()) # load ema parameters into model for running validation and inference
val_losses = test_epoch(model, val_loader, device, t_to_sigma, loss_fn, args.test_sigma_intervals)
print("Epoch {}: Validation loss {:.4f} tr {:.4f} "
.format(epoch, val_losses['loss'], val_losses['tr_loss']))
if not args.use_ema: ema_weights.copy_to(model.parameters())
ema_state_dict = copy.deepcopy(model.module.state_dict() if device.type == 'cuda' else model.state_dict())
ema_weights.restore(model.parameters())
if args.wandb:
logs.update({'train_' + k: v for k, v in train_losses.items()})
logs.update({'val_' + k: v for k, v in val_losses.items()})
logs['current_lr'] = optimizer.param_groups[0]['lr']
wandb.log(logs, step=epoch + 1)
state_dict = model.module.state_dict() if device.type == 'cuda' else model.state_dict()
if val_losses['loss'] <= best_val_loss:
best_val_loss = val_losses['loss']
best_epoch = epoch
torch.save(state_dict, os.path.join(run_dir, 'best_model.pt'))
torch.save(ema_state_dict, os.path.join(run_dir, 'best_ema_model.pt'))
if scheduler:
if args.val_inference_freq is not None:
scheduler.step(best_val_inference_value)
else:
scheduler.step(val_losses['loss'])
torch.save({
'epoch': epoch,
'model': state_dict,
'optimizer': optimizer.state_dict(),
'ema_weights': ema_weights.state_dict(),
}, os.path.join(run_dir, 'last_model.pt'))
print("Best Validation Loss {} on Epoch {}".format(best_val_loss, best_epoch))
def set_seed(seed: int = 42) -> None:
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
print(f"Random seed set as {seed}")
def main_function():
set_seed(42)
args = parse_train_args()
if args.config:
config_dict = yaml.load(args.config, Loader=yaml.FullLoader)
arg_dict = args.__dict__
for key, value in config_dict.items():
if isinstance(value, list):
for v in value:
arg_dict[key].append(v)
else:
arg_dict[key] = value
args.config = args.config.name
assert (args.inference_earlystop_goal == 'max' or args.inference_earlystop_goal == 'min')
if args.val_inference_freq is not None and args.scheduler is not None:
assert (args.scheduler_patience > args.val_inference_freq) # otherwise we will just stop training after args.scheduler_patience epochs
if args.cudnn_benchmark:
torch.backends.cudnn.benchmark = True
# construct loader
t_to_sigma = partial(t_to_sigma_compl, args=args)
train_loader, val_loader, infer_loader = construct_loader(args, t_to_sigma)
model = get_model(args, device, t_to_sigma=t_to_sigma)
optimizer, scheduler = get_optimizer_and_scheduler(args, model, scheduler_mode=args.inference_earlystop_goal if args.val_inference_freq is not None else 'min')
ema_weights = ExponentialMovingAverage(model.parameters(),decay=args.ema_rate)
if args.restart_dir:
try:
dict = torch.load(f'{args.restart_dir}/last_model.pt', map_location=torch.device('cpu'))
if args.restart_lr is not None: dict['optimizer']['param_groups'][0]['lr'] = args.restart_lr
optimizer.load_state_dict(dict['optimizer'])
model.module.load_state_dict(dict['model'], strict=True)
if hasattr(args, 'ema_rate'):
ema_weights.load_state_dict(dict['ema_weights'], device=device)
print("Restarting from epoch", dict['epoch'])
except Exception as e:
print("Exception", e)
dict = torch.load(f'{args.restart_dir}/best_model.pt', map_location=torch.device('cpu'))
model.module.load_state_dict(dict, strict=True)
print("Due to exception had to take the best epoch and no optimiser")
numel = sum([p.numel() for p in model.parameters()])
print('Model with', numel, 'parameters')
if args.wandb:
wandb.init(
entity='xiaohan-kuang',
settings=wandb.Settings(start_method="fork"),
project=args.project,
name=args.run_name,
config=args
)
wandb.log({'numel': numel})
# record parameters
run_dir = os.path.join(args.log_dir, args.run_name)
yaml_file_name = os.path.join(run_dir, 'model_parameters.yml')
save_yaml_file(yaml_file_name, args.__dict__)
args.device = device
train(args, model, optimizer, scheduler, ema_weights, train_loader, val_loader, infer_loader, t_to_sigma, run_dir)
if __name__ == '__main__':
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
main_function() | Python |
3D | kuangxh9/SuperWater | inference_water_pos.py | .py | 11,335 | 265 | import gc
import math
import os
import numpy as np
import random
import shutil
import torch.nn as nn
from argparse import Namespace, ArgumentParser, FileType
import torch.nn.functional as F
from functools import partial
import wandb
import torch
import time
from sklearn.metrics import roc_auc_score
from torch_geometric.loader import DataListLoader, DataLoader
from tqdm import tqdm
from datasets.pdbbind import PDBBind, NoiseTransform
from confidence.dataset import ConfidenceDataset
from utils.training import AverageMeter
from scipy.spatial.distance import cdist
torch.multiprocessing.set_sharing_strategy('file_system')
import yaml
from utils.utils import save_yaml_file, get_optimizer_and_scheduler, get_model
from utils.diffusion_utils import t_to_sigma as t_to_sigma_compl
from confidence.dataset import get_args
from sklearn.cluster import DBSCAN
from utils.cluster_centroid import find_centroids
from utils.find_water_pos import find_real_water_pos
from utils.nearest_point_dist import get_nearest_point_distances
from utils.parsing import parse_inference_args
args = parse_inference_args()
total_sampling_ratio = args.water_ratio * args.resample_steps
if args.config:
config_dict = yaml.load(args.config, Loader=yaml.FullLoader)
arg_dict = args.__dict__
for key, value in config_dict.items():
if isinstance(value, list):
for v in value:
arg_dict[key].append(v)
else:
arg_dict[key] = value
args.config = args.config.name
assert (args.main_metric_goal == 'max' or args.main_metric_goal == 'min')
save_pos_path = "inference_out/" + f"inferenced_pos_rr{total_sampling_ratio}_cap{args.cap}" + "/"
os.makedirs(save_pos_path, exist_ok=True)
torch.manual_seed(42)
def convert_txt_to_pdb(txt_file_path: str, output_pdb_path: str):
with open(txt_file_path, 'r') as file:
lines = file.readlines()
pdb_lines = []
for i, line in enumerate(lines, start=1):
coords = list(map(float, line.strip().split()))
pdb_line = f'HETATM{i:>5} O HOH A{1:>4} {coords[0]:8.3f}{coords[1]:8.3f}{coords[2]:8.3f} 1.00 0.00 O\n'
pdb_lines.append(pdb_line)
with open(output_pdb_path, 'w') as pdb_file:
pdb_file.writelines(pdb_lines)
print(f"Successfully saved PDB file to: {output_pdb_path}")
def test_epoch(model, loader, mad_prediction, filter=True, use_sigmoid=args.use_sigmoid, quiet=False):
model.eval()
log_data = []
log_dir = "logs"
os.makedirs(log_dir, exist_ok=True)
total_ratio = args.water_ratio * args.resample_steps
for data in tqdm(loader, total=len(loader)):
start_time = time.time()
pdb_name = data[0].name
try:
with torch.no_grad():
pred = model(data)
labels = torch.cat([graph.y for graph in data]).to(device)
if use_sigmoid:
probabilities = torch.sigmoid(pred)
pred_labels = (probabilities > args.cap).float()
else:
probabilities = pred
probabilities[probabilities > 1] = 1
probabilities = 1 - probabilities
pred_labels = (probabilities > args.cap).float()
positions = torch.cat([graph['ligand'].pos for graph in data]).to(device)
positions_adjusted = positions.cpu().numpy() + data[0].original_center.numpy()
num_sampled_positions = len(positions_adjusted)
try:
centroids = find_centroids(positions_adjusted,
probabilities.cpu().numpy(),
threshold=args.cap,
cluster_distance=1.52,
use_weighted_avg=True,
clash_distance=2.2)
print('centroids: ', len(centroids))
if centroids is None:
raise Exception(f"Centroid is None. Cannot process PDB {data[0].name}")
# save file
try:
if args.save_pos:
pdb_name = data[0].name
pdb_folder = os.path.join(save_pos_path, pdb_name)
os.makedirs(pdb_folder, exist_ok=True)
filtered_file_path = os.path.join(pdb_folder, f"{pdb_name}_filtered.txt")
filtered_probabilities_reshaped = probabilities.reshape(-1, 1).cpu().numpy()
combined_pos_prob = np.hstack((positions_adjusted, filtered_probabilities_reshaped))
np.savetxt(filtered_file_path, combined_pos_prob, fmt='%.3f')
save_txt_path = os.path.join(pdb_folder, f'{pdb_name}_centroid.txt')
np.savetxt(save_txt_path, centroids, fmt='%.8f')
print(f"Saved centroids for {pdb_name} to {save_txt_path}")
save_pdb_path = os.path.join(pdb_folder, f'{pdb_name}_centroid.pdb')
convert_txt_to_pdb(save_txt_path, save_pdb_path)
except Exception as e:
print('Cannot save pdb: ', data[0].name, e)
except Exception as e:
print(f"An error occurred on {data[0].name}", e)
continue
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
end_time = time.time()
processing_time = end_time - start_time
log_data.append((pdb_name, f"{processing_time:.2f}"))
with open(f"logs/inference_log_rr{total_ratio}.txt", "w") as log_file:
for record in log_data:
log_file.write(f"{record[0]} {record[1]}\n")
def evalulation(args, model, val_loader, run_dir):
print("Starting testing...")
test_epoch(model, val_loader, args.mad_prediction)
def construct_loader_origin(args_confidence, args, t_to_sigma):
confi_common_args = {'transform': None, 'root': args_confidence.data_dir, 'limit_complexes': args.limit_complexes,
'receptor_radius': args.receptor_radius,
'c_alpha_max_neighbors': args.c_alpha_max_neighbors,
'remove_hs': args.remove_hs, 'max_lig_size': args.max_lig_size,
'popsize': args.matching_popsize, 'maxiter': args.matching_maxiter,
'num_workers': args.num_workers, 'all_atoms': args.all_atoms,
'atom_radius': args.atom_radius, 'atom_max_neighbors': args.atom_max_neighbors,
'esm_embeddings_path': args_confidence.esm_embeddings_path}
print('esm_embeddings_path:', args_confidence.esm_embeddings_path)
test_dataset = PDBBind(cache_path=args.cache_path, split_path=args_confidence.split_test, keep_original=True,
**confi_common_args)
loader_class = DataLoader
test_loader = loader_class(dataset=test_dataset, batch_size=args_confidence.batch_size_preprocessing,
num_workers=args_confidence.num_workers, shuffle=False, pin_memory=args.pin_memory)
return test_loader
def construct_loader_confidence(args, device):
common_args = {'cache_path': args.cache_path, 'original_model_dir': args.original_model_dir, 'device': device,
'inference_steps': args.inference_steps, 'samples_per_complex': args.samples_per_complex,
'limit_complexes': args.limit_complexes, 'all_atoms': args.all_atoms, 'balance': args.balance,
'mad_classification_cutoff': args.mad_classification_cutoff,
'use_original_model_cache': args.use_original_model_cache,
'cache_creation_id': args.cache_creation_id, "cache_ids_to_combine": args.cache_ids_to_combine,
"model_ckpt": args.ckpt,
"running_mode": args.running_mode,
"water_ratio": args.water_ratio,
"resample_steps": args.resample_steps,
"save_visualization": args.save_visualization}
loader_class = DataListLoader if torch.cuda.is_available() else DataLoader
exception_flag = False
# construct original loader
original_model_args = get_args(args.original_model_dir)
t_to_sigma = partial(t_to_sigma_compl, args=original_model_args)
test_loader = construct_loader_origin(args, original_model_args, t_to_sigma)
test_dataset = ConfidenceDataset(loader=test_loader, split=os.path.splitext(os.path.basename(args.split_test))[0],
args=args, **common_args)
test_loader = loader_class(dataset=test_dataset, batch_size=args.batch_size, shuffle=False) ## TODO True
if exception_flag: raise Exception('We encountered the exception during train dataset loading: ', e)
return test_loader
def set_seed(seed: int = 42) -> None:
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
print(f"Random seed set as {seed}")
if __name__ == '__main__':
set_seed(42)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open(f'{args.original_model_dir}/model_parameters.yml') as f:
score_model_args = Namespace(**yaml.full_load(f))
with open(f'{args.confidence_dir}/model_parameters.yml') as f:
confidence_args = Namespace(**yaml.full_load(f))
test_loader = construct_loader_confidence(args, device)
t_to_sigma = partial(t_to_sigma_compl, args=score_model_args)
model = get_model(confidence_args, device, t_to_sigma=None, confidence_mode=True)
# Load state_dict
state_dict = torch.load(f'{args.confidence_dir}/best_model.pt', map_location='cpu')
# Adjust for DataParallel wrapping
new_state_dict = {'module.' + k: v for k, v in state_dict.items()}
model.load_state_dict(new_state_dict, strict=True)
numel = sum([p.numel() for p in model.parameters()])
print('Loading trained confidence model with', numel, 'parameters')
if args.wandb:
wandb.init(
entity='Xiaohan Kuang',
settings=wandb.Settings(start_method="fork"),
project=args.project,
name=args.run_name,
config=args
)
wandb.log({'numel': numel})
# record parameters
run_dir = os.path.join(args.log_dir, args.run_name)
yaml_file_name = os.path.join(run_dir, 'model_parameters.yml')
save_yaml_file(yaml_file_name, args.__dict__)
args.device = device
evalulation(args, model, test_loader, run_dir)
| Python |
3D | kuangxh9/SuperWater | check_gpu.py | .py | 532 | 17 | import torch
def check_cuda_devices():
if not torch.cuda.is_available():
print("CUDA is not available.")
return
num_gpus = torch.cuda.device_count()
print(f"Number of CUDA devices available: {num_gpus}")
for i in range(num_gpus):
device_name = torch.cuda.get_device_name(i)
device_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3
print(f"Device {i}: {device_name} with {device_memory:.2f} GB memory")
if __name__ == "__main__":
check_cuda_devices() | Python |
3D | kuangxh9/SuperWater | utils/training.py | .py | 6,382 | 156 | import copy
import numpy as np
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from confidence.dataset import ListDataset
from utils import so3, torus
from utils.sampling import sampling, randomize_position_multiple
import torch
from utils.diffusion_utils import get_t_schedule
from utils.min_dist import match_points_and_get_distances
def loss_function(tr_pred, expand_tr_sigma, data, t_to_sigma, device, tr_weight=1, apply_mean=True):
mean_dims = (0, 1) if apply_mean else 1
# translation component
tr_score = torch.cat([d.tr_score for d in data], dim=0) if device.type == 'cuda' else data.tr_score
expand_tr_sigma = expand_tr_sigma.unsqueeze(-1).cpu()
tr_loss = ((tr_pred.cpu() - tr_score) ** 2 * (expand_tr_sigma ** 2 + 1e-6)).mean(dim=mean_dims)
if torch.isnan(tr_loss).any():
print("NaN found in loss")
tr_loss = torch.nan_to_num(tr_loss, nan=0.0)
tr_base_loss = (tr_score ** 2 * expand_tr_sigma ** 2).mean(dim=mean_dims).detach()
loss = tr_loss * tr_weight
return loss, tr_loss.detach(), tr_base_loss
class AverageMeter():
def __init__(self, types, unpooled_metrics=False, intervals=1):
self.types = types
self.intervals = intervals
self.count = 0 if intervals == 1 else torch.zeros(len(types), intervals)
self.acc = {t: torch.zeros(intervals) for t in types}
self.unpooled_metrics = unpooled_metrics
def add(self, vals, interval_idx=None):
if self.intervals == 1:
self.count += 1 if vals[0].dim() == 0 else len(vals[0])
for type_idx, v in enumerate(vals):
self.acc[self.types[type_idx]] += v.sum() if self.unpooled_metrics else v
else:
for type_idx, v in enumerate(vals):
self.count[type_idx].index_add_(0, interval_idx[type_idx], torch.ones(len(v)))
if not torch.allclose(v, torch.tensor(0.0)):
self.acc[self.types[type_idx]].index_add_(0, interval_idx[type_idx], v)
def summary(self):
if self.intervals == 1:
out = {k: v.item() / self.count for k, v in self.acc.items()}
return out
else:
out = {}
for i in range(self.intervals):
for type_idx, k in enumerate(self.types):
out['int' + str(i) + '_' + k] = (
list(self.acc.values())[type_idx][i] / self.count[type_idx][i]).item()
return out
def train_epoch(model, loader, optimizer, device, t_to_sigma, loss_fn, ema_weigths):
model.train()
meter = AverageMeter(['loss', 'tr_loss', 'tr_base_loss'])
for data in tqdm(loader, total=len(loader)):
if device.type == 'cuda' and len(data) == 1 or device.type == 'cpu' and data.num_graphs == 1:
print("Skipping batch of size 1 since otherwise batchnorm would not work.")
optimizer.zero_grad()
try:
tr_pred, expand_tr_sigma, expand_batch_idx = model(data)
loss, tr_loss, tr_base_loss = \
loss_fn(tr_pred, expand_tr_sigma, data=data, t_to_sigma=t_to_sigma, device=device)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
ema_weigths.update(model.parameters())
meter.add([loss.cpu().detach(), tr_loss, tr_base_loss])
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
elif 'Input mismatch' in str(e):
print('| WARNING: weird torch_cluster error, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
return meter.summary()
def test_epoch(model, loader, device, t_to_sigma, loss_fn, test_sigma_intervals=False):
model.eval()
meter = AverageMeter(['loss', 'tr_loss', 'tr_base_loss'],
unpooled_metrics=True)
if test_sigma_intervals:
meter_all = AverageMeter(
['loss', 'tr_loss', 'tr_base_loss'],
unpooled_metrics=True, intervals=10)
for data in tqdm(loader, total=len(loader)):
try:
with torch.no_grad():
tr_pred, expand_tr_sigma, expand_batch_idx = model(data)
loss, tr_loss, tr_base_loss = \
loss_fn(tr_pred, expand_tr_sigma, data=data, t_to_sigma=t_to_sigma, apply_mean=False, device=device)
meter.add([loss.cpu().detach(), tr_loss, tr_base_loss])
if test_sigma_intervals > 0:
complex_t_tr = torch.cat([d.complex_t['tr'] for d in data])
sigma_index_tr = torch.round(complex_t_tr.cpu() * (10 - 1)).long()
expand_sigma_index_tr = torch.index_select(sigma_index_tr, dim=0, index=expand_batch_idx.cpu())
meter_all.add(
[loss.cpu().detach(), tr_loss, tr_base_loss],
[expand_sigma_index_tr, expand_sigma_index_tr, expand_sigma_index_tr, expand_sigma_index_tr])
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
elif 'Input mismatch' in str(e):
print('| WARNING: weird torch_cluster error, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
out = meter.summary()
if test_sigma_intervals > 0: out.update(meter_all.summary())
return out | Python |
3D | kuangxh9/SuperWater | utils/so3.py | .py | 3,660 | 97 | import os
import numpy as np
import torch
from scipy.spatial.transform import Rotation
MIN_EPS, MAX_EPS, N_EPS = 0.01, 2, 1000
X_N = 2000
"""
Preprocessing for the SO(3) sampling and score computations, truncated infinite series are computed and then
cached to memory, therefore the precomputation is only run the first time the repository is run on a machine
"""
omegas = np.linspace(0, np.pi, X_N + 1)[1:]
def _compose(r1, r2): # R1 @ R2 but for Euler vecs
return Rotation.from_matrix(Rotation.from_rotvec(r1).as_matrix() @ Rotation.from_rotvec(r2).as_matrix()).as_rotvec()
def _expansion(omega, eps, L=2000): # the summation term only
p = 0
for l in range(L):
p += (2 * l + 1) * np.exp(-l * (l + 1) * eps**2) * np.sin(omega * (l + 1 / 2)) / np.sin(omega / 2)
return p
def _density(expansion, omega, marginal=True): # if marginal, density over [0, pi], else over SO(3)
if marginal:
return expansion * (1 - np.cos(omega)) / np.pi
else:
return expansion / 8 / np.pi ** 2 # the constant factor doesn't affect any actual calculations though
def _score(exp, omega, eps, L=2000): # score of density over SO(3)
dSigma = 0
for l in range(L):
hi = np.sin(omega * (l + 1 / 2))
dhi = (l + 1 / 2) * np.cos(omega * (l + 1 / 2))
lo = np.sin(omega / 2)
dlo = 1 / 2 * np.cos(omega / 2)
dSigma += (2 * l + 1) * np.exp(-l * (l + 1) * eps**2) * (lo * dhi - hi * dlo) / lo ** 2
return dSigma / exp
if os.path.exists('.so3_omegas_array2.npy'):
_omegas_array = np.load('.so3_omegas_array2.npy')
_cdf_vals = np.load('.so3_cdf_vals2.npy')
_score_norms = np.load('.so3_score_norms2.npy')
_exp_score_norms = np.load('.so3_exp_score_norms2.npy')
else:
print("Precomputing and saving to cache SO(3) distribution table")
_eps_array = 10 ** np.linspace(np.log10(MIN_EPS), np.log10(MAX_EPS), N_EPS)
_omegas_array = np.linspace(0, np.pi, X_N + 1)[1:]
_exp_vals = np.asarray([_expansion(_omegas_array, eps) for eps in _eps_array])
_pdf_vals = np.asarray([_density(_exp, _omegas_array, marginal=True) for _exp in _exp_vals])
_cdf_vals = np.asarray([_pdf.cumsum() / X_N * np.pi for _pdf in _pdf_vals])
_score_norms = np.asarray([_score(_exp_vals[i], _omegas_array, _eps_array[i]) for i in range(len(_eps_array))])
_exp_score_norms = np.sqrt(np.sum(_score_norms**2 * _pdf_vals, axis=1) / np.sum(_pdf_vals, axis=1) / np.pi)
np.save('.so3_omegas_array2.npy', _omegas_array)
np.save('.so3_cdf_vals2.npy', _cdf_vals)
np.save('.so3_score_norms2.npy', _score_norms)
np.save('.so3_exp_score_norms2.npy', _exp_score_norms)
def sample(eps):
eps_idx = (np.log10(eps) - np.log10(MIN_EPS)) / (np.log10(MAX_EPS) - np.log10(MIN_EPS)) * N_EPS
eps_idx = np.clip(np.around(eps_idx).astype(int), a_min=0, a_max=N_EPS - 1)
x = np.random.rand()
return np.interp(x, _cdf_vals[eps_idx], _omegas_array)
def sample_vec(eps):
x = np.random.randn(3)
x /= np.linalg.norm(x)
return x * sample(eps)
def score_vec(eps, vec):
eps_idx = (np.log10(eps) - np.log10(MIN_EPS)) / (np.log10(MAX_EPS) - np.log10(MIN_EPS)) * N_EPS
eps_idx = np.clip(np.around(eps_idx).astype(int), a_min=0, a_max=N_EPS - 1)
om = np.linalg.norm(vec)
return np.interp(om, _omegas_array, _score_norms[eps_idx]) * vec / om
def score_norm(eps):
eps = eps.numpy()
eps_idx = (np.log10(eps) - np.log10(MIN_EPS)) / (np.log10(MAX_EPS) - np.log10(MIN_EPS)) * N_EPS
eps_idx = np.clip(np.around(eps_idx).astype(int), a_min=0, a_max=N_EPS-1)
return torch.from_numpy(_exp_score_norms[eps_idx]).float()
| Python |
3D | kuangxh9/SuperWater | utils/visualise.py | .py | 2,190 | 53 | from rdkit.Chem.rdmolfiles import MolToPDBBlock, MolToPDBFile
import rdkit.Chem
from rdkit import Geometry
from collections import defaultdict
import copy
import numpy as np
import torch
from rdkit import Chem
from rdkit.Chem import rdmolfiles
class PDBFile:
def __init__(self, mol):
self.parts = defaultdict(dict)
self.mol = copy.deepcopy(mol)
[self.mol.RemoveConformer(j) for j in range(mol.GetNumConformers()) if j]
def add(self, coords, order, part=0, repeat=1):
if type(coords) in [rdkit.Chem.Mol, rdkit.Chem.RWMol]:
block = MolToPDBBlock(coords).split('\n')[:-2]
self.parts[part][order] = {'block': block, 'repeat': repeat}
return
elif type(coords) is np.ndarray:
coords = coords.astype(np.float64)
elif type(coords) is torch.Tensor:
coords = coords.double().numpy()
for i in range(coords.shape[0]):
self.mol.GetConformer(0).SetAtomPosition(i, Geometry.Point3D(coords[i, 0], coords[i, 1], coords[i, 2]))
block = MolToPDBBlock(self.mol).split('\n')[:-2]
self.parts[part][order] = {'block': block, 'repeat': repeat}
def write(self, path=None, limit_parts=None):
is_first = True
str_ = ''
for part in sorted(self.parts.keys()):
if limit_parts and part >= limit_parts:
break
part = self.parts[part]
keys_positive = sorted(filter(lambda x: x >=0, part.keys()))
keys_negative = sorted(filter(lambda x: x < 0, part.keys()))
keys = list(keys_positive) + list(keys_negative)
for key in keys:
block = part[key]['block']
times = part[key]['repeat']
for _ in range(times):
if not is_first:
block = [line for line in block if 'CONECT' not in line]
is_first = False
str_ += 'MODEL\n'
str_ += '\n'.join(block)
str_ += '\nENDMDL\n'
if not path:
return str_
with open(path, 'w') as f:
f.write(str_) | Python |
3D | kuangxh9/SuperWater | utils/diffusion_utils.py | .py | 2,797 | 74 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from scipy.stats import beta
from utils.geometry import axis_angle_to_matrix, rigid_transform_Kabsch_3D_torch
from utils.torsion import modify_conformer_torsion_angles
def t_to_sigma(t_tr, args):
tr_sigma = args.tr_sigma_min ** (1-t_tr) * args.tr_sigma_max ** t_tr
return tr_sigma
def modify_conformer(data, tr_update):
data['ligand'].pos = data['ligand'].pos + tr_update
return data
def sinusoidal_embedding(timesteps, embedding_dim, max_positions=10000):
""" from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py """
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class GaussianFourierProjection(nn.Module):
"""Gaussian Fourier embeddings for noise levels.
from https://github.com/yang-song/score_sde_pytorch/blob/1618ddea340f3e4a2ed7852a0694a809775cf8d0/models/layerspp.py#L32
"""
def __init__(self, embedding_size=256, scale=1.0):
super().__init__()
self.W = nn.Parameter(torch.randn(embedding_size//2) * scale, requires_grad=False)
def forward(self, x):
x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
emb = torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)
return emb
def get_timestep_embedding(embedding_type, embedding_dim, embedding_scale=10000):
if embedding_type == 'sinusoidal':
emb_func = (lambda x : sinusoidal_embedding(embedding_scale * x, embedding_dim))
elif embedding_type == 'fourier':
emb_func = GaussianFourierProjection(embedding_size=embedding_dim, scale=embedding_scale)
else:
raise NotImplemented
return emb_func
def get_t_schedule(inference_steps):
return np.linspace(1, 0, inference_steps + 1)[:-1]
def set_time(complex_graphs, t_tr, batchsize, all_atoms, device):
complex_graphs['ligand'].node_t = {
'tr': t_tr * torch.ones(complex_graphs['ligand'].num_nodes).to(device)}
complex_graphs['receptor'].node_t = {
'tr': t_tr * torch.ones(complex_graphs['receptor'].num_nodes).to(device)}
complex_graphs.complex_t = {'tr': t_tr * torch.ones(batchsize).to(device)}
if all_atoms:
complex_graphs['atom'].node_t = {
'tr': t_tr * torch.ones(complex_graphs['atom'].num_nodes).to(device)
} | Python |
3D | kuangxh9/SuperWater | utils/geometry.py | .py | 4,021 | 124 | import math
import torch
def quaternion_to_matrix(quaternions):
"""
From https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def axis_angle_to_quaternion(axis_angle):
"""
From https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def axis_angle_to_matrix(axis_angle):
"""
From https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def rigid_transform_Kabsch_3D_torch(A, B):
# R = 3x3 rotation matrix, t = 3x1 column vector
# This already takes residue identity into account.
assert A.shape[1] == B.shape[1]
num_rows, num_cols = A.shape
if num_rows != 3:
raise Exception(f"matrix A is not 3xN, it is {num_rows}x{num_cols}")
num_rows, num_cols = B.shape
if num_rows != 3:
raise Exception(f"matrix B is not 3xN, it is {num_rows}x{num_cols}")
# find mean column wise: 3 x 1
centroid_A = torch.mean(A, axis=1, keepdims=True)
centroid_B = torch.mean(B, axis=1, keepdims=True)
# subtract mean
Am = A - centroid_A
Bm = B - centroid_B
H = Am @ Bm.T
# find rotation
U, S, Vt = torch.linalg.svd(H)
R = Vt.T @ U.T
# special reflection case
if torch.linalg.det(R) < 0:
# print("det(R) < R, reflection detected!, correcting for it ...")
SS = torch.diag(torch.tensor([1.,1.,-1.], device=A.device))
R = (Vt.T @ SS) @ U.T
assert math.fabs(torch.linalg.det(R) - 1) < 3e-3 # note I had to change this error bound to be higher
t = -R @ centroid_A + centroid_B
return R, t
| Python |
3D | kuangxh9/SuperWater | utils/find_water_pos.py | .py | 1,267 | 37 | import numpy as np
import warnings
from Bio.PDB import PDBParser
from openbabel import openbabel as ob
def find_real_water_pos(file_path, model_index=0):
file_extension = file_path.split('.')[-1].lower()
if file_extension == 'pdb':
warnings.simplefilter('ignore')
parser = PDBParser(QUIET=True)
structure = parser.get_structure('PDB_structure', file_path)
water_positions = []
first_model = next(structure.get_models())
for chain in first_model:
for residue in chain:
for atom in residue:
if atom.element == 'O':
water_positions.append(atom.coord)
elif file_extension == 'mol2':
obConversion = ob.OBConversion()
obConversion.SetInFormat("mol2")
mol = ob.OBMol()
obConversion.ReadFile(mol, file_path)
water_positions = []
for atom in ob.OBMolAtomIter(mol):
if atom.GetType() == 'O':
water_positions.append(np.array([atom.GetX(), atom.GetY(), atom.GetZ()]))
else:
raise ValueError("Unsupported file format. Please provide a PDB or MOL2 file.")
return np.array(water_positions) | Python |
3D | kuangxh9/SuperWater | utils/nearest_point_dist.py | .py | 325 | 9 | import torch
def get_nearest_point_distances(set1, set2):
points1 = torch.tensor(set1, dtype=torch.float)
points2 = torch.tensor(set2, dtype=torch.float)
dists = torch.cdist(points1, points2)
min_dists, indices = torch.min(dists, dim=1)
min_dists_np = min_dists.numpy()
return min_dists_np, indices | Python |
3D | kuangxh9/SuperWater | utils/inference_utils.py | .py | 10,690 | 274 | import os
import torch
from Bio.PDB import PDBParser
from esm import FastaBatchedDataset, pretrained
from rdkit.Chem import AddHs, MolFromSmiles
from torch_geometric.data import Dataset, HeteroData
import esm
from datasets.process_mols import parse_pdb_from_path, generate_conformer, read_molecule, get_lig_graph_with_matching, \
extract_receptor_structure, get_rec_graph
three_to_one = {'ALA': 'A',
'ARG': 'R',
'ASN': 'N',
'ASP': 'D',
'CYS': 'C',
'GLN': 'Q',
'GLU': 'E',
'GLY': 'G',
'HIS': 'H',
'ILE': 'I',
'LEU': 'L',
'LYS': 'K',
'MET': 'M',
'MSE': 'M', # MSE this is almost the same AA as MET. The sulfur is just replaced by Selen
'PHE': 'F',
'PRO': 'P',
'PYL': 'O',
'SER': 'S',
'SEC': 'U',
'THR': 'T',
'TRP': 'W',
'TYR': 'Y',
'VAL': 'V',
'ASX': 'B',
'GLX': 'Z',
'XAA': 'X',
'XLE': 'J'}
def get_sequences_from_pdbfile(file_path):
biopython_parser = PDBParser()
structure = biopython_parser.get_structure('random_id', file_path)
structure = structure[0]
sequence = None
for i, chain in enumerate(structure):
seq = ''
for res_idx, residue in enumerate(chain):
if residue.get_resname() == 'HOH':
continue
residue_coords = []
c_alpha, n, c = None, None, None
for atom in residue:
if atom.name == 'CA':
c_alpha = list(atom.get_vector())
if atom.name == 'N':
n = list(atom.get_vector())
if atom.name == 'C':
c = list(atom.get_vector())
if c_alpha != None and n != None and c != None: # only append residue if it is an amino acid
try:
seq += three_to_one[residue.get_resname()]
except Exception as e:
seq += '-'
print("encountered unknown AA: ", residue.get_resname(), ' in the complex. Replacing it with a dash - .')
if sequence is None:
sequence = seq
else:
sequence += (":" + seq)
return sequence
def set_nones(l):
return [s if str(s) != 'nan' else None for s in l]
def get_sequences(protein_files, protein_sequences):
new_sequences = []
for i in range(len(protein_files)):
if protein_files[i] is not None:
new_sequences.append(get_sequences_from_pdbfile(protein_files[i]))
else:
new_sequences.append(protein_sequences[i])
return new_sequences
def compute_ESM_embeddings(model, alphabet, labels, sequences):
# settings used
toks_per_batch = 4096
repr_layers = [33]
include = "per_tok"
truncation_seq_length = 1022
dataset = FastaBatchedDataset(labels, sequences)
batches = dataset.get_batch_indices(toks_per_batch, extra_toks_per_seq=1)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=alphabet.get_batch_converter(truncation_seq_length), batch_sampler=batches
)
assert all(-(model.num_layers + 1) <= i <= model.num_layers for i in repr_layers)
repr_layers = [(i + model.num_layers + 1) % (model.num_layers + 1) for i in repr_layers]
embeddings = {}
with torch.no_grad():
for batch_idx, (labels, strs, toks) in enumerate(data_loader):
print(f"Processing {batch_idx + 1} of {len(batches)} batches ({toks.size(0)} sequences)")
if torch.cuda.is_available():
toks = toks.to(device="cuda", non_blocking=True)
out = model(toks, repr_layers=repr_layers, return_contacts=False)
representations = {layer: t.to(device="cpu") for layer, t in out["representations"].items()}
for i, label in enumerate(labels):
truncate_len = min(truncation_seq_length, len(strs[i]))
embeddings[label] = representations[33][i, 1: truncate_len + 1].clone()
return embeddings
def generate_ESM_structure(model, filename, sequence):
model.set_chunk_size(256)
chunk_size = 256
output = None
while output is None:
try:
with torch.no_grad():
output = model.infer_pdb(sequence)
with open(filename, "w") as f:
f.write(output)
print("saved", filename)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on chunk_size', chunk_size)
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
chunk_size = chunk_size // 2
if chunk_size > 2:
model.set_chunk_size(chunk_size)
else:
print("Not enough memory for ESMFold")
break
else:
raise e
return output is not None
class InferenceDataset(Dataset):
def __init__(self, out_dir, complex_names, protein_files, ligand_descriptions, protein_sequences, lm_embeddings,
receptor_radius=30, c_alpha_max_neighbors=None, precomputed_lm_embeddings=None,
remove_hs=False, all_atoms=False, atom_radius=5, atom_max_neighbors=None):
super(InferenceDataset, self).__init__()
self.receptor_radius = receptor_radius
self.c_alpha_max_neighbors = c_alpha_max_neighbors
self.remove_hs = remove_hs
self.all_atoms = all_atoms
self.atom_radius, self.atom_max_neighbors = atom_radius, atom_max_neighbors
self.complex_names = complex_names
self.protein_files = protein_files
self.ligand_descriptions = ligand_descriptions
self.protein_sequences = protein_sequences
# generate LM embeddings
if lm_embeddings and (precomputed_lm_embeddings is None or precomputed_lm_embeddings[0] is None):
print("Generating ESM language model embeddings")
model_location = "esm2_t33_650M_UR50D"
model, alphabet = pretrained.load_model_and_alphabet(model_location)
model.eval()
if torch.cuda.is_available():
model = model.cuda()
protein_sequences = get_sequences(protein_files, protein_sequences)
labels, sequences = [], []
for i in range(len(protein_sequences)):
s = protein_sequences[i].split(':')
sequences.extend(s)
labels.extend([complex_names[i] + '_chain_' + str(j) for j in range(len(s))])
lm_embeddings = compute_ESM_embeddings(model, alphabet, labels, sequences)
self.lm_embeddings = []
for i in range(len(protein_sequences)):
s = protein_sequences[i].split(':')
self.lm_embeddings.append([lm_embeddings[f'{complex_names[i]}_chain_{j}'] for j in range(len(s))])
elif not lm_embeddings:
self.lm_embeddings = [None] * len(self.complex_names)
else:
self.lm_embeddings = precomputed_lm_embeddings
# generate structures with ESMFold
if None in protein_files:
print("generating missing structures with ESMFold")
model = esm.pretrained.esmfold_v1()
model = model.eval().cuda()
for i in range(len(protein_files)):
if protein_files[i] is None:
self.protein_files[i] = f"{out_dir}/{complex_names[i]}/{complex_names[i]}_esmfold.pdb"
if not os.path.exists(self.protein_files[i]):
print("generating", self.protein_files[i])
generate_ESM_structure(model, self.protein_files[i], protein_sequences[i])
def len(self):
return len(self.complex_names)
def get(self, idx):
name, protein_file, ligand_description, lm_embedding = \
self.complex_names[idx], self.protein_files[idx], self.ligand_descriptions[idx], self.lm_embeddings[idx]
complex_graph = HeteroData()
complex_graph['name'] = name
try:
mol = MolFromSmiles(ligand_description)
if mol is not None:
mol = AddHs(mol)
generate_conformer(mol)
else:
mol = read_molecule(ligand_description, remove_hs=False, sanitize=True)
if mol is None:
raise Exception('RDKit could not read the molecule ', ligand_description)
mol.RemoveAllConformers()
mol = AddHs(mol)
generate_conformer(mol)
except Exception as e:
print('Failed to read molecule ', ligand_description, ' We are skipping it. The reason is the exception: ', e)
complex_graph['success'] = False
return complex_graph
try:
# parse the receptor from the pdb file
rec_model = parse_pdb_from_path(protein_file)
get_lig_graph_with_matching(mol, complex_graph, popsize=None, maxiter=None, matching=False, keep_original=False,
num_conformers=1, remove_hs=self.remove_hs)
rec, rec_coords, c_alpha_coords, n_coords, c_coords, lm_embeddings = extract_receptor_structure(rec_model, mol, lm_embedding_chains=lm_embedding)
if lm_embeddings is not None and len(c_alpha_coords) != len(lm_embeddings):
print(f'LM embeddings for complex {name} did not have the right length for the protein. Skipping {name}.')
complex_graph['success'] = False
return complex_graph
get_rec_graph(rec, rec_coords, c_alpha_coords, n_coords, c_coords, complex_graph, rec_radius=self.receptor_radius,
c_alpha_max_neighbors=self.c_alpha_max_neighbors, all_atoms=self.all_atoms,
atom_radius=self.atom_radius, atom_max_neighbors=self.atom_max_neighbors, remove_hs=self.remove_hs, lm_embeddings=lm_embeddings)
except Exception as e:
print(f'Skipping {name} because of the error:')
print(e)
complex_graph['success'] = False
return complex_graph
protein_center = torch.mean(complex_graph['receptor'].pos, dim=0, keepdim=True)
complex_graph['receptor'].pos -= protein_center
if self.all_atoms:
complex_graph['atom'].pos -= protein_center
ligand_center = torch.mean(complex_graph['ligand'].pos, dim=0, keepdim=True)
complex_graph['ligand'].pos -= ligand_center
complex_graph.original_center = protein_center
complex_graph.mol = mol
complex_graph['success'] = True
return complex_graph
| Python |
3D | kuangxh9/SuperWater | utils/cluster_centroid.py | .py | 2,794 | 81 | import numpy as np
from scipy.spatial import distance_matrix, cKDTree
from scipy.spatial.distance import pdist
def find_centroids(pred_coords, coords_prob, threshold=0.5,
cluster_distance=1.52, use_weighted_avg=True, clash_distance=2.2,
dedupe_decimals=6, tol=1e-8):
"""
Returns centroids after clustering, weighted/best selection, and clash removal.
Clash removal uses global greedy non-maximum suppression (NMS) with prob sorting,
ensuring minimum pairwise distance >= clash_distance - tol.
"""
valid_indices = np.where(coords_prob >= threshold)[0]
refined_coords = pred_coords[valid_indices]
refined_probs = coords_prob[valid_indices]
if refined_coords.size == 0:
return None
dist_mat = distance_matrix(refined_coords, refined_coords)
clusters = []
visited = set()
for i in range(len(refined_coords)):
if i in visited:
continue
neighbors = np.where((dist_mat[i] < cluster_distance) & (dist_mat[i] > 0))[0]
if neighbors.size > 0 and refined_probs[i] < np.max(refined_probs[neighbors]):
continue
cluster = [i]
for nb in neighbors:
if nb not in visited:
cluster.append(nb)
visited.add(nb)
clusters.append(cluster)
finals = []
final_probs = []
for cl in clusters:
cc = refined_coords[cl]
pp = refined_probs[cl]
best_idx = np.argmax(pp)
best_prob = pp[best_idx]
if use_weighted_avg and len(cl) > 1:
w = pp / (pp.sum() + 1e-12)
centroid = np.average(cc, axis=0, weights=w)
else:
centroid = cc[best_idx]
finals.append(centroid)
final_probs.append(best_prob)
if len(finals) == 0:
return None
final_centroids = np.asarray(finals, dtype=float)
final_probs = np.asarray(final_probs, dtype=float)
tree = cKDTree(final_centroids)
neighbor_lists = tree.query_ball_point(final_centroids, r=clash_distance - tol)
order = np.argsort(-final_probs)
blocked = np.zeros(len(final_centroids), dtype=bool)
keep_idx = []
for idx in order:
if blocked[idx]:
continue
keep_idx.append(idx)
for nb in neighbor_lists[idx]:
blocked[nb] = True
final_centroids = final_centroids[keep_idx]
final_probs = final_probs[keep_idx]
if final_centroids.shape[0] > 1:
rounded = np.round(final_centroids, dedupe_decimals)
uniq, uniq_idx = np.unique(rounded, axis=0, return_index=True)
order2 = np.argsort(uniq_idx)
final_centroids = final_centroids[uniq_idx[order2]]
# final_probs = final_probs[uniq_idx[order2]]
return final_centroids
| Python |
3D | kuangxh9/SuperWater | utils/min_dist.py | .py | 379 | 12 | import torch
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import numpy as np
def match_points_and_get_distances(data1, data2):
dist_matrix = cdist(data1, data2)
row_ind, col_ind = linear_sum_assignment(dist_matrix)
min_distances = np.array([dist_matrix[i, j] for i, j in zip(row_ind, col_ind)])
return min_distances
| Python |
3D | kuangxh9/SuperWater | utils/seed.py | .py | 5,511 | 132 | import logging
import os
import random
from random import getstate as python_get_rng_state
from random import setstate as python_set_rng_state
from typing import Any, Dict, Optional
import numpy as np
import torch
from lightning_fabric.utilities.rank_zero import _get_rank, rank_prefixed_message, rank_zero_only, rank_zero_warn
log = logging.getLogger(__name__)
max_seed_value = np.iinfo(np.uint32).max
min_seed_value = np.iinfo(np.uint32).min
def seed_everything(seed: Optional[int] = None, workers: bool = False) -> int:
r"""Function that sets seed for pseudo-random number generators in: pytorch, numpy, python.random In addition,
sets the following environment variables:
- ``PL_GLOBAL_SEED``: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
- ``PL_SEED_WORKERS``: (optional) is set to 1 if ``workers=True``.
Args:
seed: the integer value seed for global random state in Lightning.
If ``None``, will read seed from ``PL_GLOBAL_SEED`` env variable
or select it randomly.
workers: if set to ``True``, will properly configure all dataloaders passed to the
Trainer with a ``worker_init_fn``. If the user already provides such a function
for their dataloaders, setting this argument will have no influence. See also:
:func:`~lightning_fabric.utilities.seed.pl_worker_init_function`.
"""
if seed is None:
env_seed = os.environ.get("PL_GLOBAL_SEED")
if env_seed is None:
seed = _select_seed_randomly(min_seed_value, max_seed_value)
rank_zero_warn(f"No seed found, seed set to {seed}")
else:
try:
seed = int(env_seed)
except ValueError:
seed = _select_seed_randomly(min_seed_value, max_seed_value)
rank_zero_warn(f"Invalid seed found: {repr(env_seed)}, seed set to {seed}")
elif not isinstance(seed, int):
seed = int(seed)
if not (min_seed_value <= seed <= max_seed_value):
rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
seed = _select_seed_randomly(min_seed_value, max_seed_value)
log.info(rank_prefixed_message(f"Seed set to {seed}", _get_rank()))
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
return seed
def _select_seed_randomly(min_seed_value: int = min_seed_value, max_seed_value: int = max_seed_value) -> int:
return random.randint(min_seed_value, max_seed_value) # noqa: S311
def reset_seed() -> None:
r"""Reset the seed to the value that :func:`~lightning_fabric.utilities.seed.seed_everything` previously set.
If :func:`~lightning_fabric.utilities.seed.seed_everything` is unused, this function will do nothing.
"""
seed = os.environ.get("PL_GLOBAL_SEED", None)
if seed is None:
return
workers = os.environ.get("PL_SEED_WORKERS", "0")
seed_everything(int(seed), workers=bool(int(workers)))
def pl_worker_init_function(worker_id: int, rank: Optional[int] = None) -> None: # pragma: no cover
r"""The worker_init_fn that Lightning automatically adds to your dataloader if you previously set the seed with
``seed_everything(seed, workers=True)``.
See also the PyTorch documentation on
`randomness in DataLoaders <https://pytorch.org/docs/stable/notes/randomness.html#dataloader>`_.
"""
# implementation notes: https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562
global_rank = rank if rank is not None else rank_zero_only.rank
process_seed = torch.initial_seed()
# back out the base seed so we can use all the bits
base_seed = process_seed - worker_id
log.debug(
f"Initializing random number generators of process {global_rank} worker {worker_id} with base seed {base_seed}"
)
ss = np.random.SeedSequence([base_seed, worker_id, global_rank])
# use 128 bits (4 x 32-bit words)
np.random.seed(ss.generate_state(4))
# Spawn distinct SeedSequences for the PyTorch PRNG and the stdlib random module
torch_ss, stdlib_ss = ss.spawn(2)
torch.manual_seed(torch_ss.generate_state(1, dtype=np.uint64)[0])
# use 128 bits expressed as an integer
stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum()
random.seed(stdlib_seed)
def _collect_rng_states(include_cuda: bool = True) -> Dict[str, Any]:
r"""Collect the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python."""
states = {
"torch": torch.get_rng_state(),
"numpy": np.random.get_state(),
"python": python_get_rng_state(),
}
if include_cuda:
states["torch.cuda"] = torch.cuda.get_rng_state_all()
return states
def _set_rng_states(rng_state_dict: Dict[str, Any]) -> None:
r"""Set the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python in the current
process."""
torch.set_rng_state(rng_state_dict["torch"])
# torch.cuda rng_state is only included since v1.8.
if "torch.cuda" in rng_state_dict:
torch.cuda.set_rng_state_all(rng_state_dict["torch.cuda"])
np.random.set_state(rng_state_dict["numpy"])
version, state, gauss = rng_state_dict["python"]
python_set_rng_state((version, tuple(state), gauss))
| Python |
3D | kuangxh9/SuperWater | utils/torus.py | .py | 2,609 | 84 | import numpy as np
import tqdm
import os
"""
Preprocessing for the SO(2)/torus sampling and score computations, truncated infinite series are computed and then
cached to memory, therefore the precomputation is only run the first time the repository is run on a machine
"""
def p(x, sigma, N=10):
p_ = 0
for i in tqdm.trange(-N, N + 1):
p_ += np.exp(-(x + 2 * np.pi * i) ** 2 / 2 / sigma ** 2)
return p_
def grad(x, sigma, N=10):
p_ = 0
for i in tqdm.trange(-N, N + 1):
p_ += (x + 2 * np.pi * i) / sigma ** 2 * np.exp(-(x + 2 * np.pi * i) ** 2 / 2 / sigma ** 2)
return p_
X_MIN, X_N = 1e-5, 5000 # relative to pi
SIGMA_MIN, SIGMA_MAX, SIGMA_N = 3e-3, 2, 5000 # relative to pi
x = 10 ** np.linspace(np.log10(X_MIN), 0, X_N + 1) * np.pi
sigma = 10 ** np.linspace(np.log10(SIGMA_MIN), np.log10(SIGMA_MAX), SIGMA_N + 1) * np.pi
if os.path.exists('.p.npy'):
p_ = np.load('.p.npy')
score_ = np.load('.score.npy')
else:
print("Precomputing and saving to cache torus distribution table")
p_ = p(x, sigma[:, None], N=100)
np.save('.p.npy', p_)
score_ = grad(x, sigma[:, None], N=100) / p_
np.save('.score.npy', score_)
def score(x, sigma):
x = (x + np.pi) % (2 * np.pi) - np.pi
sign = np.sign(x)
x = np.log(np.abs(x) / np.pi)
x = (x - np.log(X_MIN)) / (0 - np.log(X_MIN)) * X_N
x = np.round(np.clip(x, 0, X_N)).astype(int)
sigma = np.log(sigma / np.pi)
sigma = (sigma - np.log(SIGMA_MIN)) / (np.log(SIGMA_MAX) - np.log(SIGMA_MIN)) * SIGMA_N
sigma = np.round(np.clip(sigma, 0, SIGMA_N)).astype(int)
return -sign * score_[sigma, x]
def p(x, sigma):
x = (x + np.pi) % (2 * np.pi) - np.pi
x = np.log(np.abs(x) / np.pi)
x = (x - np.log(X_MIN)) / (0 - np.log(X_MIN)) * X_N
x = np.round(np.clip(x, 0, X_N)).astype(int)
sigma = np.log(sigma / np.pi)
sigma = (sigma - np.log(SIGMA_MIN)) / (np.log(SIGMA_MAX) - np.log(SIGMA_MIN)) * SIGMA_N
sigma = np.round(np.clip(sigma, 0, SIGMA_N)).astype(int)
return p_[sigma, x]
def sample(sigma):
out = sigma * np.random.randn(*sigma.shape)
out = (out + np.pi) % (2 * np.pi) - np.pi
return out
score_norm_ = score(
sample(sigma[None].repeat(10000, 0).flatten()),
sigma[None].repeat(10000, 0).flatten()
).reshape(10000, -1)
score_norm_ = (score_norm_ ** 2).mean(0)
def score_norm(sigma):
sigma = np.log(sigma / np.pi)
sigma = (sigma - np.log(SIGMA_MIN)) / (np.log(SIGMA_MAX) - np.log(SIGMA_MIN)) * SIGMA_N
sigma = np.round(np.clip(sigma, 0, SIGMA_N)).astype(int)
return score_norm_[sigma]
| Python |
3D | kuangxh9/SuperWater | utils/utils.py | .py | 9,726 | 245 | import os
import subprocess
import warnings
from datetime import datetime
import signal
from contextlib import contextmanager
import numpy as np
import torch
import yaml
from rdkit import Chem
from rdkit.Chem import RemoveHs, MolToPDBFile
from torch_geometric.nn.data_parallel import DataParallel
from torch.nn.parallel import DistributedDataParallel
from models.all_atom_score_model import TensorProductScoreModel as AAScoreModel
from models.all_atom_score_model import RecycleNet as RecycAAScoreModel
from models.score_model import TensorProductScoreModel as CGScoreModel
from utils.diffusion_utils import get_timestep_embedding
from spyrmsd import rmsd, molecule
def get_obrmsd(mol1_path, mol2_path, cache_name=None):
cache_name = datetime.now().strftime('date%d-%m_time%H-%M-%S.%f') if cache_name is None else cache_name
os.makedirs(".openbabel_cache", exist_ok=True)
if not isinstance(mol1_path, str):
MolToPDBFile(mol1_path, '.openbabel_cache/obrmsd_mol1_cache.pdb')
mol1_path = '.openbabel_cache/obrmsd_mol1_cache.pdb'
if not isinstance(mol2_path, str):
MolToPDBFile(mol2_path, '.openbabel_cache/obrmsd_mol2_cache.pdb')
mol2_path = '.openbabel_cache/obrmsd_mol2_cache.pdb'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return_code = subprocess.run(f"obrms {mol1_path} {mol2_path} > .openbabel_cache/obrmsd_{cache_name}.rmsd",
shell=True)
print(return_code)
obrms_output = read_strings_from_txt(f".openbabel_cache/obrmsd_{cache_name}.rmsd")
rmsds = [line.split(" ")[-1] for line in obrms_output]
return np.array(rmsds, dtype=np.float)
def remove_all_hs(mol):
params = Chem.RemoveHsParameters()
params.removeAndTrackIsotopes = True
params.removeDefiningBondStereo = True
params.removeDegreeZero = True
params.removeDummyNeighbors = True
params.removeHigherDegrees = True
params.removeHydrides = True
params.removeInSGroups = True
params.removeIsotopes = True
params.removeMapped = True
params.removeNonimplicit = True
params.removeOnlyHNeighbors = True
params.removeWithQuery = True
params.removeWithWedgedBond = True
return RemoveHs(mol, params)
def read_strings_from_txt(path):
# every line will be one element of the returned list
with open(path) as file:
lines = file.readlines()
return [line.rstrip() for line in lines]
def save_yaml_file(path, content):
assert isinstance(path, str), f'path must be a string, got {path} which is a {type(path)}'
content = yaml.dump(data=content)
if '/' in path and os.path.dirname(path) and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as f:
f.write(content)
def get_optimizer_and_scheduler(args, model, scheduler_mode='min'):
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.w_decay)
if args.scheduler == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=scheduler_mode, factor=0.7,
patience=args.scheduler_patience, min_lr=args.lr / 100)
else:
print('No scheduler')
scheduler = None
return optimizer, scheduler
def get_model(args, device, t_to_sigma, no_parallel=False, confidence_mode=False):
if 'all_atoms' in args and args.all_atoms:
model_class = AAScoreModel
else:
model_class = CGScoreModel
timestep_emb_func = get_timestep_embedding(
embedding_type=args.embedding_type,
embedding_dim=args.sigma_embed_dim,
embedding_scale=args.embedding_scale)
lm_embedding_type = None
if args.esm_embeddings_path is not None: lm_embedding_type = 'esm'
model = model_class(t_to_sigma=t_to_sigma,
device=device,
timestep_emb_func=timestep_emb_func,
num_conv_layers=args.num_conv_layers,
lig_max_radius=args.max_radius,
scale_by_sigma=args.scale_by_sigma,
sigma_embed_dim=args.sigma_embed_dim,
ns=args.ns, nv=args.nv,
distance_embed_dim=args.distance_embed_dim,
cross_distance_embed_dim=args.cross_distance_embed_dim,
batch_norm=not args.no_batch_norm,
dropout=args.dropout,
use_second_order_repr=args.use_second_order_repr,
cross_max_distance=args.cross_max_distance,
dynamic_max_cross=args.dynamic_max_cross,
lm_embedding_type=lm_embedding_type,
confidence_mode=confidence_mode,
num_confidence_outputs=len(
args.rmsd_classification_cutoff) + 1 if 'rmsd_classification_cutoff' in args and isinstance(
args.rmsd_classification_cutoff, list) else 1)
if device.type == 'cuda' and not no_parallel:
model = DataParallel(model)
model.to(device)
return model
def get_symmetry_rmsd(mol, coords1, coords2, mol2=None):
with time_limit(10):
mol = molecule.Molecule.from_rdkit(mol)
mol2 = molecule.Molecule.from_rdkit(mol2) if mol2 is not None else mol2
mol2_atomicnums = mol2.atomicnums if mol2 is not None else mol.atomicnums
mol2_adjacency_matrix = mol2.adjacency_matrix if mol2 is not None else mol.adjacency_matrix
RMSD = rmsd.symmrmsd(
coords1,
coords2,
mol.atomicnums,
mol2_atomicnums,
mol.adjacency_matrix,
mol2_adjacency_matrix,
)
return RMSD
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
class ExponentialMovingAverage:
""" from https://github.com/yang-song/score_sde_pytorch/blob/main/models/ema.py
Maintains (exponential) moving average of a set of parameters. """
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict, device):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = [tensor.to(device) for tensor in state_dict['shadow_params']]
| Python |
3D | kuangxh9/SuperWater | utils/sampling.py | .py | 4,588 | 92 | import numpy as np
import torch
from torch_geometric.loader import DataLoader
from utils.diffusion_utils import modify_conformer, set_time
from utils.torsion import modify_conformer_torsion_angles
from scipy.spatial.transform import Rotation as R
# from utils.visualise import save_water_to_pdb_file
import os
def randomize_position_multiple(data_list, no_random, tr_sigma_max, water_num = 100):
for complex_graph in data_list:
if not no_random:
complex_graph['ligand'].pos = torch.normal(mean=0, std=tr_sigma_max, size=(water_num, 3))
complex_graph['ligand'].x = complex_graph['ligand'].x[0, :].repeat(water_num, 1)
complex_graph['ligand'].batch = torch.zeros(water_num, dtype=torch.int)
def sampling(data_list, model, inference_steps, tr_schedule, device, t_to_sigma, model_args,
no_random=False, ode=False, save_visualization=False, visualisation_dir='inference_out/diff_process', confidence_model=None, confidence_data_list=None,
confidence_model_args=None, batch_size=32, no_final_step_noise=False):
N = len(data_list)
for t_idx in range(inference_steps):
t_tr = tr_schedule[t_idx]
dt_tr = tr_schedule[t_idx] - tr_schedule[t_idx + 1] if t_idx < inference_steps - 1 else tr_schedule[t_idx]
loader = DataLoader(data_list, batch_size=batch_size)
new_data_list = []
for complex_graph_batch in loader:
b = complex_graph_batch.num_graphs
complex_graph_batch = complex_graph_batch.to(device)
tr_sigma = t_to_sigma(t_tr)
set_time(complex_graph_batch, t_tr, b, model_args.all_atoms, device)
with torch.no_grad():
tr_score, _, _ = model(complex_graph_batch)
if torch.isnan(tr_score).any():
print("stop")
tr_g = tr_sigma * torch.sqrt(torch.tensor(2 * np.log(model_args.tr_sigma_max / model_args.tr_sigma_min)))
if ode:
tr_perturb = (0.5 * tr_g ** 2 * dt_tr * tr_score.cpu()).cpu()
else:
tr_z = torch.zeros((len(tr_score), 3)) if no_random or (no_final_step_noise and t_idx == inference_steps - 1) \
else torch.normal(mean=0, std=1, size=(len(tr_score), 3))
tr_perturb = (tr_g ** 2 * dt_tr * tr_score.cpu() + tr_g * np.sqrt(dt_tr) * tr_z).cpu()
for i, complex_graph in enumerate(complex_graph_batch.to('cpu').to_data_list()):
num_water = complex_graph['ligand'].pos.shape[0]
new_data_list.extend([modify_conformer(complex_graph, tr_perturb[i*num_water:i*num_water + num_water])])
data_list = new_data_list
if save_visualization:
os.makedirs(visualisation_dir, exist_ok=True)
for complex_graph in data_list:
file_path = f"{visualisation_dir}/water_{complex_graph['name'][0]}_L{inference_steps}.pdb"
save_water_to_pdb_file(complex_graph, file_path, t_idx)
with torch.no_grad():
if confidence_model is not None:
loader = DataLoader(data_list, batch_size=batch_size)
confidence_loader = iter(DataLoader(confidence_data_list, batch_size=batch_size))
confidence = []
for complex_graph_batch in loader:
complex_graph_batch = complex_graph_batch.to(device)
if confidence_data_list is not None:
confidence_complex_graph_batch = next(confidence_loader).to(device)
confidence_complex_graph_batch['ligand'].pos = complex_graph_batch['ligand'].pos
set_time(confidence_complex_graph_batch, 0, N, confidence_model_args.all_atoms, device)
confidence.append(confidence_model(confidence_complex_graph_batch))
else:
confidence.append(confidence_model(complex_graph_batch))
confidence = torch.cat(confidence, dim=0)
else:
confidence = None
return data_list, confidence
def save_water_to_pdb_file(complex_graph, file_path, model_idx):
with open(file_path, "a") as pdb_file:
pdb_file.write(f"MODEL\n")
water_coords = complex_graph['ligand'].pos + complex_graph.original_center
for i, coords in enumerate(water_coords, start=1):
line = f'HETATM{i:>5} O HOH A{1:>4} {coords[0]:8.3f}{coords[1]:8.3f}{coords[2]:8.3f} 1.00 0.00 O\n'
pdb_file.write(line)
pdb_file.write("ENDMDL\n") | Python |
3D | kuangxh9/SuperWater | utils/torsion.py | .py | 3,606 | 94 | import networkx as nx
import numpy as np
import torch, copy
from scipy.spatial.transform import Rotation as R
from torch_geometric.utils import to_networkx
from torch_geometric.data import Data
"""
Preprocessing and computation for torsional updates to conformers
"""
def get_transformation_mask(pyg_data):
G = to_networkx(pyg_data.to_homogeneous(), to_undirected=False)
to_rotate = []
edges = pyg_data['ligand', 'ligand'].edge_index.T.numpy()
for i in range(0, edges.shape[0], 2):
assert edges[i, 0] == edges[i+1, 1]
G2 = G.to_undirected()
G2.remove_edge(*edges[i])
if not nx.is_connected(G2):
l = list(sorted(nx.connected_components(G2), key=len)[0])
if len(l) > 1:
if edges[i, 0] in l:
to_rotate.append([])
to_rotate.append(l)
else:
to_rotate.append(l)
to_rotate.append([])
continue
to_rotate.append([])
to_rotate.append([])
mask_edges = np.asarray([0 if len(l) == 0 else 1 for l in to_rotate], dtype=bool)
mask_rotate = np.zeros((np.sum(mask_edges), len(G.nodes())), dtype=bool)
idx = 0
for i in range(len(G.edges())):
if mask_edges[i]:
mask_rotate[idx][np.asarray(to_rotate[i], dtype=int)] = True
idx += 1
return mask_edges, mask_rotate
def modify_conformer_torsion_angles(pos, edge_index, mask_rotate, torsion_updates, as_numpy=False):
pos = copy.deepcopy(pos)
if type(pos) != np.ndarray: pos = pos.cpu().numpy()
for idx_edge, e in enumerate(edge_index.cpu().numpy()):
if torsion_updates[idx_edge] == 0:
continue
u, v = e[0], e[1]
# check if need to reverse the edge, v should be connected to the part that gets rotated
assert not mask_rotate[idx_edge, u]
assert mask_rotate[idx_edge, v]
rot_vec = pos[u] - pos[v] # convention: positive rotation if pointing inwards
rot_vec = rot_vec * torsion_updates[idx_edge] / np.linalg.norm(rot_vec) # idx_edge!
rot_mat = R.from_rotvec(rot_vec).as_matrix()
pos[mask_rotate[idx_edge]] = (pos[mask_rotate[idx_edge]] - pos[v]) @ rot_mat.T + pos[v]
if not as_numpy: pos = torch.from_numpy(pos.astype(np.float32))
return pos
def perturb_batch(data, torsion_updates, split=False, return_updates=False):
if type(data) is Data:
return modify_conformer_torsion_angles(data.pos,
data.edge_index.T[data.edge_mask],
data.mask_rotate, torsion_updates)
pos_new = [] if split else copy.deepcopy(data.pos)
edges_of_interest = data.edge_index.T[data.edge_mask]
idx_node = 0
idx_edges = 0
torsion_update_list = []
for i, mask_rotate in enumerate(data.mask_rotate):
pos = data.pos[idx_node:idx_node + mask_rotate.shape[1]]
edges = edges_of_interest[idx_edges:idx_edges + mask_rotate.shape[0]] - idx_node
torsion_update = torsion_updates[idx_edges:idx_edges + mask_rotate.shape[0]]
torsion_update_list.append(torsion_update)
pos_new_ = modify_conformer_torsion_angles(pos, edges, mask_rotate, torsion_update)
if split:
pos_new.append(pos_new_)
else:
pos_new[idx_node:idx_node + mask_rotate.shape[1]] = pos_new_
idx_node += mask_rotate.shape[1]
idx_edges += mask_rotate.shape[0]
if return_updates:
return pos_new, torsion_update_list
return pos_new | Python |
3D | kuangxh9/SuperWater | utils/parsing.py | .py | 23,272 | 267 |
from argparse import ArgumentParser,FileType
def parse_train_args():
# General arguments
parser = ArgumentParser()
parser.add_argument('--config', type=FileType(mode='r'), default=None)
parser.add_argument('--log_dir', type=str, default='workdir', help='Folder in which to save model and logs')
parser.add_argument('--restart_dir', type=str, help='Folder of previous training model from which to restart')
parser.add_argument('--cache_path', type=str, default='data/cache', help='Folder from where to load/restore cached dataset')
parser.add_argument('--data_dir', type=str, default='data/waterbind/', help='Folder containing original structures')
parser.add_argument('--split_train', type=str, default='data/splits/train_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_val', type=str, default='data/splits/val_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_test', type=str, default='data/splits/test_res15', help='Path of file defining the split')
parser.add_argument('--test_sigma_intervals', action='store_true', default=False, help='Whether to log loss per noise interval')
parser.add_argument('--val_inference_freq', type=int, default=None, help='Frequency of epochs for which to run expensive inference on val data')
parser.add_argument('--train_inference_freq', type=int, default=None, help='Frequency of epochs for which to run expensive inference on train data')
parser.add_argument('--inference_steps', type=int, default=20, help='Number of denoising steps for inference on val')
parser.add_argument('--inference_earlystop_goal', type=str, default='max', help='Whether to maximize or minimize metric')
parser.add_argument('--wandb', action='store_true', default=False, help='')
parser.add_argument('--project', type=str, default='superwater_train', help='')
parser.add_argument('--run_name', type=str, default='', help='')
parser.add_argument('--cudnn_benchmark', action='store_true', default=False, help='CUDA optimization parameter for faster training')
parser.add_argument('--num_dataloader_workers', type=int, default=0, help='Number of workers for dataloader')
parser.add_argument('--pin_memory', action='store_true', default=False, help='pin_memory arg of dataloader')
# Training arguments
parser.add_argument('--n_epochs', type=int, default=400, help='Number of epochs for training')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size')
parser.add_argument('--scheduler', type=str, default=None, help='LR scheduler')
parser.add_argument('--scheduler_patience', type=int, default=20, help='Patience of the LR scheduler')
parser.add_argument('--lr', type=float, default=1e-3, help='Initial learning rate')
parser.add_argument('--restart_lr', type=float, default=None, help='If this is not none, the lr of the optimizer will be overwritten with this value when restarting from a checkpoint.')
parser.add_argument('--w_decay', type=float, default=0.0, help='Weight decay added to loss')
parser.add_argument('--num_workers', type=int, default=1, help='Number of workers for preprocessing')
parser.add_argument('--use_ema', action='store_true', default=False, help='Whether or not to use ema for the model weights')
parser.add_argument('--ema_rate', type=float, default=0.999, help='decay rate for the exponential moving average model parameters ')
# Dataset
parser.add_argument('--limit_complexes', type=int, default=0, help='If positive, the number of training and validation complexes is capped')
parser.add_argument('--all_atoms', action='store_true', default=False, help='Whether to use the all atoms model')
parser.add_argument('--receptor_radius', type=float, default=30, help='Cutoff on distances for receptor edges')
parser.add_argument('--c_alpha_max_neighbors', type=int, default=10, help='Maximum number of neighbors for each residue')
parser.add_argument('--atom_radius', type=float, default=5, help='Cutoff on distances for atom connections')
parser.add_argument('--atom_max_neighbors', type=int, default=8, help='Maximum number of atom neighbours for receptor')
parser.add_argument('--matching_popsize', type=int, default=20, help='Differential evolution popsize parameter in matching')
parser.add_argument('--matching_maxiter', type=int, default=20, help='Differential evolution maxiter parameter in matching')
parser.add_argument('--max_lig_size', type=int, default=None, help='Maximum number of heavy atoms in ligand')
parser.add_argument('--remove_hs', action='store_true', default=False, help='remove Hs')
parser.add_argument('--num_conformers', type=int, default=1, help='Number of conformers to match to each ligand')
parser.add_argument('--esm_embeddings_path', type=str, default=None, help='If this is set then the LM embeddings at that path will be used for the receptor features')
# Diffusion
parser.add_argument('--tr_weight', type=float, default=1, help='Weight of translation loss')
parser.add_argument('--tr_sigma_min', type=float, default=0.1, help='Minimum sigma for translational component')
parser.add_argument('--tr_sigma_max', type=float, default=30, help='Maximum sigma for translational component')
# Score Model
parser.add_argument('--num_conv_layers', type=int, default=2, help='Number of interaction layers')
parser.add_argument('--max_radius', type=float, default=5.0, help='Radius cutoff for geometric graph')
parser.add_argument('--scale_by_sigma', action='store_true', default=True, help='Whether to normalise the score')
parser.add_argument('--ns', type=int, default=16, help='Number of hidden features per node of order 0')
parser.add_argument('--nv', type=int, default=4, help='Number of hidden features per node of order >0')
parser.add_argument('--distance_embed_dim', type=int, default=32, help='Embedding size for the distance')
parser.add_argument('--cross_distance_embed_dim', type=int, default=32, help='Embeddings size for the cross distance')
parser.add_argument('--no_batch_norm', action='store_true', default=False, help='If set, it removes the batch norm')
parser.add_argument('--use_second_order_repr', action='store_true', default=False, help='Whether to use only up to first order representations or also second')
parser.add_argument('--cross_max_distance', type=float, default=80, help='Maximum cross distance in case not dynamic')
parser.add_argument('--dynamic_max_cross', action='store_true', default=False, help='Whether to use the dynamic distance cutoff')
parser.add_argument('--dropout', type=float, default=0.0, help='MLP dropout')
parser.add_argument('--embedding_type', type=str, default="sinusoidal", help='Type of diffusion time embedding')
parser.add_argument('--sigma_embed_dim', type=int, default=32, help='Size of the embedding of the diffusion time')
parser.add_argument('--embedding_scale', type=int, default=1000, help='Parameter of the diffusion time embedding')
args = parser.parse_args()
return args
def parse_confidence_args():
# General arguments
parser = ArgumentParser()
parser.add_argument('--config', type=FileType(mode='r'), default=None)
parser.add_argument('--original_model_dir', type=str, default='workdir', help='Path to folder with trained model and hyperparameters')
parser.add_argument('--original_pdb_dir', type=str, default='data/waterbind', help='Path to folder with original PDB file downloaded from PDB website')
parser.add_argument('--restart_dir', type=str, default=None, help='')
parser.add_argument('--use_original_model_cache', action='store_true', default=False, help='If this is true, the same dataset as in the original model will be used. Otherwise, the dataset parameters are used.')
parser.add_argument('--data_dir', type=str, default='data/waterbind/', help='Folder containing original structures')
parser.add_argument('--ckpt', type=str, default='best_model.pt', help='Checkpoint to use inside the folder')
parser.add_argument('--model_save_frequency', type=int, default=0, help='Frequency with which to save the last model. If 0, then only the early stopping criterion best model is saved and overwritten.')
parser.add_argument('--best_model_save_frequency', type=int, default=0, help='Frequency with which to save the best model. If 0, then only the early stopping criterion best model is saved and overwritten.')
parser.add_argument('--run_name', type=str, default='test_confidence', help='')
parser.add_argument('--project', type=str, default='diffwater_confidence', help='')
parser.add_argument('--split_train', type=str, default='data/splits/train_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_val', type=str, default='data/splits/val_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_test', type=str, default='data/splits/test_res15', help='Path of file defining the split')
# Inference parameters for creating the positions and mads that the confidence predictor will be trained on.
parser.add_argument('--cache_path', type=str, default='data/cache_confidence', help='Folder from where to load/restore cached dataset')
parser.add_argument('--cache_ids_to_combine', nargs='+', type=str, default=None, help='')
parser.add_argument('--cache_creation_id', type=int, default=None, help='number of times that inference is run on the full dataset before concatenating it and coming up with the full confidence dataset')
parser.add_argument('--wandb', action='store_true', default=False, help='')
parser.add_argument('--inference_steps', type=int, default=2, help='Number of denoising steps')
parser.add_argument('--samples_per_complex', type=int, default=1, help='')
parser.add_argument('--balance', action='store_true', default=False, help='If this is true than we do not force the samples seen during training to be the same amount of negatives as positives')
parser.add_argument('--mad_prediction', action='store_true', default=False, help='')
parser.add_argument('--mad_classification_cutoff', type=float, default=1, help='MAD value below which a prediction is considered a postitive. This can also be multiple cutoffs.')
parser.add_argument('--log_dir', type=str, default='workdir', help='')
parser.add_argument('--main_metric', type=str, default='confidence_loss', help='Metric to track for early stopping. Mostly [loss, accuracy, ROC AUC]')
parser.add_argument('--main_metric_goal', type=str, default='min', help='Can be [min, max]')
parser.add_argument('--transfer_weights', action='store_true', default=False, help='')
parser.add_argument('--batch_size', type=int, default=5, help='')
parser.add_argument('--batch_size_preprocessing', type=int, default=1, help='Number of workers')
parser.add_argument('--lr', type=float, default=1e-3, help='')
parser.add_argument('--w_decay', type=float, default=0.0, help='')
parser.add_argument('--scheduler', type=str, default='plateau', help='')
parser.add_argument('--scheduler_patience', type=int, default=50, help='')
parser.add_argument('--n_epochs', type=int, default=5, help='')
# Dataset
parser.add_argument('--limit_complexes', type=int, default=0, help='')
parser.add_argument('--all_atoms', action='store_true', default=False, help='')
parser.add_argument('--multiplicity', type=int, default=1, help='')
parser.add_argument('--chain_cutoff', type=float, default=10, help='')
parser.add_argument('--receptor_radius', type=float, default=15, help='')
parser.add_argument('--c_alpha_max_neighbors', type=int, default=24, help='')
parser.add_argument('--atom_radius', type=float, default=5, help='')
parser.add_argument('--atom_max_neighbors', type=int, default=8, help='')
parser.add_argument('--matching_popsize', type=int, default=20, help='')
parser.add_argument('--matching_maxiter', type=int, default=20, help='')
parser.add_argument('--max_lig_size', type=int, default=None, help='Maximum number of heavy atoms')
parser.add_argument('--remove_hs', action='store_true', default=False, help='remove Hs')
parser.add_argument('--num_conformers', type=int, default=1, help='')
parser.add_argument('--esm_embeddings_path', type=str, default=None,help='If this is set then the LM embeddings at that path will be used for the receptor features')
parser.add_argument('--no_torsion', action='store_true', default=False, help='')
# Model
parser.add_argument('--num_conv_layers', type=int, default=2, help='Number of interaction layers')
parser.add_argument('--max_radius', type=float, default=5.0, help='Radius cutoff for geometric graph')
parser.add_argument('--scale_by_sigma', action='store_true', default=True, help='Whether to normalise the score')
parser.add_argument('--ns', type=int, default=16, help='Number of hidden features per node of order 0')
parser.add_argument('--nv', type=int, default=4, help='Number of hidden features per node of order >0')
parser.add_argument('--distance_embed_dim', type=int, default=32, help='')
parser.add_argument('--cross_distance_embed_dim', type=int, default=32, help='')
parser.add_argument('--no_batch_norm', action='store_true', default=False, help='If set, it removes the batch norm')
parser.add_argument('--use_second_order_repr', action='store_true', default=False, help='Whether to use only up to first order representations or also second')
parser.add_argument('--cross_max_distance', type=float, default=80, help='')
parser.add_argument('--dynamic_max_cross', action='store_true', default=False, help='')
parser.add_argument('--dropout', type=float, default=0.1, help='MLP dropout')
parser.add_argument('--embedding_type', type=str, default="sinusoidal", help='')
parser.add_argument('--sigma_embed_dim', type=int, default=32, help='')
parser.add_argument('--embedding_scale', type=int, default=10000, help='')
parser.add_argument('--confidence_no_batchnorm', action='store_true', default=False, help='')
parser.add_argument('--confidence_dropout', type=float, default=0.0, help='MLP dropout in confidence readout')
parser.add_argument('--num_workers', type=int, default=1, help='Number of workers')
parser.add_argument('--running_mode', type=str, default='train', help='')
parser.add_argument('--water_ratio', type=int, default=15, help='')
parser.add_argument('--resample_steps', type=int, default=1, help='')
args = parser.parse_args()
return args
def parse_inference_args():
# General arguments
parser = ArgumentParser()
parser.add_argument('--config', type=FileType(mode='r'), default=None)
parser.add_argument('--original_model_dir', type=str, default='workdir',
help='Path to folder with trained model and hyperparameters')
parser.add_argument('--confidence_dir', type=str, default='workdir',
help='Path to folder with trained confidence model and hyperparameters')
parser.add_argument('--restart_dir', type=str, default=None, help='')
parser.add_argument('--use_original_model_cache', action='store_true', default=False,
help='If this is true, the same dataset as in the original model will be used. Otherwise, the dataset parameters are used.')
parser.add_argument('--data_dir', type=str, default='data/waterbind/',
help='Folder containing original structures')
parser.add_argument('--ckpt', type=str, default='best_model.pt', help='Checkpoint to use inside the folder')
parser.add_argument('--model_save_frequency', type=int, default=0,
help='Frequency with which to save the last model. If 0, then only the early stopping criterion best model is saved and overwritten.')
parser.add_argument('--best_model_save_frequency', type=int, default=0,
help='Frequency with which to save the best model. If 0, then only the early stopping criterion best model is saved and overwritten.')
parser.add_argument('--run_name', type=str, default='inference', help='')
parser.add_argument('--project', type=str, default='superwater_evalution', help='')
parser.add_argument('--split_train', type=str, default='data/splits/train_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_val', type=str, default='data/splits/val_res15.txt', help='Path of file defining the split')
parser.add_argument('--split_test', type=str, default='data/splits/test_res15', help='Path of file defining the split')
# Inference parameters for creating the positions and mads that the confidence predictor will be trained on.
parser.add_argument('--cache_path', type=str, default='data/cacheNew',
help='Folder from where to load/restore cached dataset')
parser.add_argument('--cache_ids_to_combine', nargs='+', type=str, default='1',
help='MAD value below which a prediction is considered a postitive. This can also be multiple cutoffs.')
parser.add_argument('--cache_creation_id', type=int, default=1,
help='number of times that inference is run on the full dataset before concatenating it and coming up with the full confidence dataset')
parser.add_argument('--wandb', action='store_true', default=False, help='')
parser.add_argument('--inference_steps', type=int, default=20, help='Number of denoising steps')
parser.add_argument('--samples_per_complex', type=int, default=1, help='')
parser.add_argument('--balance', action='store_true', default=False,
help='If this is true than we do not force the samples seen during training to be the same amount of negatives as positives')
parser.add_argument('--mad_prediction', action='store_true', default=False, help='')
parser.add_argument('--mad_classification_cutoff', type=float, default=2,
help='MAD value below which a prediction is considered a postitive. This can also be multiple cutoffs.')
parser.add_argument('--log_dir', type=str, default='workdir', help='')
parser.add_argument('--main_metric', type=str, default='accuracy',
help='Metric to track for early stopping. Mostly [loss, accuracy, ROC AUC]')
parser.add_argument('--main_metric_goal', type=str, default='max', help='Can be [min, max]')
parser.add_argument('--transfer_weights', action='store_true', default=False, help='')
parser.add_argument('--batch_size', type=int, default=1, help='')
parser.add_argument('--batch_size_preprocessing', type=int, default=1, help='Number of workers')
parser.add_argument('--lr', type=float, default=1e-3, help='')
parser.add_argument('--w_decay', type=float, default=0.0, help='')
parser.add_argument('--scheduler', type=str, default='plateau', help='')
parser.add_argument('--scheduler_patience', type=int, default=20, help='')
parser.add_argument('--n_epochs', type=int, default=1, help='')
# Dataset
parser.add_argument('--limit_complexes', type=int, default=0, help='')
parser.add_argument('--all_atoms', action='store_true', default=False, help='')
parser.add_argument('--multiplicity', type=int, default=1, help='')
parser.add_argument('--chain_cutoff', type=float, default=10, help='')
parser.add_argument('--receptor_radius', type=float, default=30, help='')
parser.add_argument('--c_alpha_max_neighbors', type=int, default=10, help='')
parser.add_argument('--atom_radius', type=float, default=5, help='')
parser.add_argument('--atom_max_neighbors', type=int, default=8, help='')
parser.add_argument('--matching_popsize', type=int, default=20, help='')
parser.add_argument('--matching_maxiter', type=int, default=20, help='')
parser.add_argument('--max_lig_size', type=int, default=None, help='Maximum number of heavy atoms')
parser.add_argument('--remove_hs', action='store_true', default=False, help='remove Hs')
parser.add_argument('--num_conformers', type=int, default=1, help='')
parser.add_argument('--esm_embeddings_path', type=str, default=None,
help='If this is set then the LM embeddings at that path will be used for the receptor features')
parser.add_argument('--no_torsion', action='store_true', default=False, help='')
# Model
parser.add_argument('--num_conv_layers', type=int, default=2, help='Number of interaction layers')
parser.add_argument('--max_radius', type=float, default=5.0, help='Radius cutoff for geometric graph')
parser.add_argument('--scale_by_sigma', action='store_true', default=True, help='Whether to normalise the score')
parser.add_argument('--ns', type=int, default=16, help='Number of hidden features per node of order 0')
parser.add_argument('--nv', type=int, default=4, help='Number of hidden features per node of order >0')
parser.add_argument('--distance_embed_dim', type=int, default=32, help='')
parser.add_argument('--cross_distance_embed_dim', type=int, default=32, help='')
parser.add_argument('--no_batch_norm', action='store_true', default=False, help='If set, it removes the batch norm')
parser.add_argument('--use_second_order_repr', action='store_true', default=False,
help='Whether to use only up to first order representations or also second')
parser.add_argument('--cross_max_distance', type=float, default=80, help='')
parser.add_argument('--dynamic_max_cross', action='store_true', default=False, help='')
parser.add_argument('--dropout', type=float, default=0.0, help='MLP dropout')
parser.add_argument('--embedding_type', type=str, default="sinusoidal", help='')
parser.add_argument('--sigma_embed_dim', type=int, default=32, help='')
parser.add_argument('--embedding_scale', type=int, default=10000, help='')
parser.add_argument('--confidence_no_batchnorm', action='store_true', default=False, help='')
parser.add_argument('--confidence_dropout', type=float, default=0.0, help='MLP dropout in confidence readout')
parser.add_argument('--num_workers', type=int, default=1, help='Number of workers')
# Inference
parser.add_argument('--cap', type=float, default=0.1, help='confidence model prob threshold')
parser.add_argument('--save_pos', action='store_true', default=False, help='')
parser.add_argument('--cluster_eps', type=float, default=1, help='')
parser.add_argument('--cluster_min_samples', type=int, default=1, help='')
parser.add_argument('--running_mode', type=str, default="test")
parser.add_argument('--water_ratio', type=int, default=15, help='')
parser.add_argument('--resample_steps', type=int, default=1, help='')
parser.add_argument('--use_sigmoid', action='store_true', default=False, help='')
parser.add_argument('--save_visualization', action='store_true', default=False, help='')
args = parser.parse_args()
return args | Python |
3D | kuangxh9/SuperWater | confidence/dataset.py | .py | 15,205 | 287 | import itertools
import math
import os
import pickle
import random
from argparse import Namespace
from functools import partial
import copy
from scipy.spatial import cKDTree
import time
import numpy as np
import pandas as pd
import torch
import yaml
from torch_geometric.data import Dataset, Data
from torch_geometric.loader import DataLoader
from tqdm import tqdm
from datasets.pdbbind import PDBBind
from utils.diffusion_utils import get_t_schedule
from utils.utils import get_model
from utils.diffusion_utils import t_to_sigma as t_to_sigma_compl
from utils.sampling import sampling, randomize_position_multiple
from utils.nearest_point_dist import get_nearest_point_distances
from utils.find_water_pos import find_real_water_pos
class ListDataset(Dataset):
def __init__(self, list):
super().__init__()
self.data_list = list
def len(self) -> int:
return len(self.data_list)
def get(self, idx: int) -> Data:
return self.data_list[idx]
def get_cache_path(args, split):
cache_path = args.cache_path
cache_path += '_torsion'
if args.all_atoms:
cache_path += '_allatoms'
split_path = args.split_train if split == 'train' else args.split_val
cache_path = os.path.join(cache_path, f'limit{args.limit_complexes}_INDEX{os.path.splitext(os.path.basename(split_path))[0]}_maxLigSize{args.max_lig_size}_H{int(not args.remove_hs)}_recRad{args.receptor_radius}_recMax{args.c_alpha_max_neighbors}'
+ ('' if not args.all_atoms else f'_atomRad{args.atom_radius}_atomMax{args.atom_max_neighbors}')
+ ('' if args.esm_embeddings_path is None else f'_esmEmbeddings'))
return cache_path
def get_args(original_model_dir):
with open(f'{original_model_dir}/model_parameters.yml') as f:
model_args = Namespace(**yaml.full_load(f))
return model_args
def find_tp_coords(real_water_pos, predicted_water_pos, threshold=1.0):
tree = cKDTree(real_water_pos)
indices = tree.query_ball_point(predicted_water_pos, r=threshold)
tp_points = predicted_water_pos[np.array([len(i) > 0 for i in indices])]
return tp_points
class ConfidenceDataset(Dataset):
def __init__(self, loader, cache_path, original_model_dir, split, device, limit_complexes,
inference_steps, samples_per_complex, all_atoms,
args, model_ckpt, balance=False, use_original_model_cache=True, mad_classification_cutoff=2,
cache_ids_to_combine=None, cache_creation_id=None, running_mode=None, water_ratio=15, resample_steps=1, save_visualization=False):
super(ConfidenceDataset, self).__init__()
self.loader = loader
self.device = device
self.inference_steps = inference_steps
self.limit_complexes = limit_complexes
self.all_atoms = all_atoms
self.original_model_dir = original_model_dir
self.balance = balance
self.use_original_model_cache = use_original_model_cache
self.mad_classification_cutoff = mad_classification_cutoff
self.cache_ids_to_combine = cache_ids_to_combine
self.cache_creation_id = cache_creation_id
self.samples_per_complex = samples_per_complex
self.model_ckpt = model_ckpt
self.args = args
self.running_mode = running_mode
self.water_ratio = water_ratio
self.resample_steps = resample_steps
self.save_visualization = save_visualization
self.original_model_args, original_model_cache = get_args(original_model_dir), self.loader.dataset.full_cache_path
# check if the docked positions have already been computed, if not run the preprocessing (docking every complex)
self.full_cache_path = os.path.join(cache_path, f'model_{os.path.splitext(os.path.basename(original_model_dir))[0]}'
f'_split_{split}_limit_{limit_complexes}')
print("cache path is ", self.full_cache_path)
if (not os.path.exists(os.path.join(self.full_cache_path, "water_positions.pkl")) and self.cache_creation_id is None) or \
(not os.path.exists(os.path.join(self.full_cache_path, f"water_positions_id{self.cache_creation_id}.pkl")) and self.cache_creation_id is not None):
os.makedirs(self.full_cache_path, exist_ok=True)
self.preprocessing(original_model_cache)
all_mads_unsorted, all_full_water_positions_unsorted, all_names_unsorted = [], [], []
for idx, cache_id in enumerate(self.cache_ids_to_combine):
print(f'HAPPENING | Loading positions and MADs from cache_id from the path: {os.path.join(self.full_cache_path, "water_positions_"+ str(cache_id)+ ".pkl")}')
if not os.path.exists(os.path.join(self.full_cache_path, f"water_positions_id{cache_id}.pkl")): raise Exception(f'The generated water positions with cache_id do not exist: {cache_id}') # be careful with changing this error message since it is sometimes cought in a try catch
with open(os.path.join(self.full_cache_path, f"water_positions_id{cache_id}.pkl"), 'rb') as f:
full_water_positions, mads = pickle.load(f)
with open(os.path.join(self.full_cache_path, f"complex_names_in_same_order_id{cache_id}.pkl"), 'rb') as f:
names_unsorted = pickle.load(f)
all_names_unsorted.append(names_unsorted)
all_mads_unsorted.append(mads)
all_full_water_positions_unsorted.append(full_water_positions)
names_order = list(set(sum(all_names_unsorted, [])))
all_mads, all_full_water_positions, all_names = [], [], []
for idx, (mads_unsorted, full_water_positions_unsorted, names_unsorted) in enumerate(zip(all_mads_unsorted,all_full_water_positions_unsorted, all_names_unsorted)):
name_to_pos_dict = {name: (mad, pos) for name, mad, pos in zip(names_unsorted, full_water_positions_unsorted, mads_unsorted) }
intermediate_mads = [name_to_pos_dict[name][1] for name in names_order]
all_mads.append((intermediate_mads))
intermediate_pos = [name_to_pos_dict[name][0] for name in names_order]
all_full_water_positions.append((intermediate_pos))
self.full_water_positions, self.mads = [], []
for positions_tuple in list(zip(*all_full_water_positions)):
self.full_water_positions.append(np.concatenate(positions_tuple, axis=0))
for positions_tuple in list(zip(*all_mads)):
self.mads.append(np.concatenate(positions_tuple, axis=0))
generated_mad_complex_names = names_order
print('Number of complex graphs: ', len(self.loader.dataset))
print('Number of MADs and positions for the complex graphs: ', len(self.full_water_positions))
self.all_samples_per_complex = samples_per_complex * (1 if self.cache_ids_to_combine is None else len(self.cache_ids_to_combine))
self.positions_mads_dict = {name: (pos, mad) for name, pos, mad in zip (generated_mad_complex_names, self.full_water_positions, self.mads)}
self.dataset_names = list(self.positions_mads_dict.keys())
if limit_complexes > 0:
self.dataset_names = self.dataset_names[:limit_complexes]
def len(self):
return len(self.dataset_names)
def get(self, idx):
complex_name = self.dataset_names[idx]
complex_graph = torch.load(os.path.join(self.loader.dataset.full_cache_path, f"{complex_name}.pt"))
positions, mads = self.positions_mads_dict[self.dataset_names[idx]]
assert(complex_graph.name == self.dataset_names[idx])
complex_graph['ligand'].x = complex_graph['ligand'].x[-1].repeat(positions.shape[-2], 1)
if self.balance:
if isinstance(self.mad_classification_cutoff, list): raise ValueError("a list for --mad_classification_cutoff can only be used without --balance")
label = random.randint(0, 1)
success = mads < self.mad_classification_cutoff
n_success = np.count_nonzero(success)
if label == 0 and n_success != self.all_samples_per_complex:
# sample negative complexpr
sample = random.randint(0, self.all_samples_per_complex - n_success - 1)
lig_pos = positions[~success][sample]
complex_graph['ligand'].pos = torch.from_numpy(lig_pos)
else:
# sample positive complex
if n_success > 0: # if no successfull sample returns the matched complex
sample = random.randint(0, n_success - 1)
lig_pos = positions[success][sample]
complex_graph['ligand'].pos = torch.from_numpy(lig_pos)
complex_graph.y = torch.tensor(label).float()
else:
sample = random.randint(0, self.all_samples_per_complex - 1)
complex_graph['ligand'].pos = torch.from_numpy(positions[sample])
complex_graph.y = torch.tensor(mads < self.mad_classification_cutoff).float()
if isinstance(self.mad_classification_cutoff, list):
complex_graph.y_binned = torch.tensor(np.logical_and(mads[sample] < self.mad_classification_cutoff + [math.inf],mads[sample] >= [0] + self.mad_classification_cutoff), dtype=torch.float).unsqueeze(0)
complex_graph.y = torch.tensor(mads[sample] < self.mad_classification_cutoff[0]).unsqueeze(0).float()
complex_graph.mad = torch.tensor(mads).float()
complex_graph['ligand'].node_t = {'tr': 0 * torch.ones(complex_graph['ligand'].num_nodes)}
complex_graph['receptor'].node_t = {'tr': 0 * torch.ones(complex_graph['receptor'].num_nodes)}
if self.all_atoms:
complex_graph['atom'].node_t = {'tr': 0 * torch.ones(complex_graph['atom'].num_nodes)}
complex_graph.complex_t = {'tr': 0 * torch.ones(1)}
return complex_graph
def preprocessing(self, original_model_cache):
log_data = []
log_dir = "logs"
os.makedirs(log_dir, exist_ok=True)
t_to_sigma = partial(t_to_sigma_compl, args=self.original_model_args)
model = get_model(self.original_model_args, self.device, t_to_sigma=t_to_sigma, no_parallel=True)
state_dict = torch.load(f'{self.original_model_dir}/{self.model_ckpt}', map_location=torch.device('cpu'))
model.load_state_dict(state_dict, strict=True)
model = model.to(self.device)
model.eval()
tr_schedule = get_t_schedule(inference_steps=self.inference_steps)
print('Running mode: ', self.running_mode)
if self.running_mode == "train":
water_ratio = self.water_ratio
resample_steps = self.resample_steps
elif self.running_mode == "test":
water_ratio = self.water_ratio
resample_steps = self.resample_steps
else:
raise ValueError("Invalid running mode!")
total_resample_ratio = water_ratio * resample_steps
print('common t schedule', tr_schedule)
print('water_number/residue_number ratio: ', water_ratio)
print('resampling steps: ', resample_steps)
print('total resampling ratio: ', total_resample_ratio)
mads, full_water_positions, names = [], [], []
for idx, orig_complex_graph in tqdm(enumerate(self.loader)):
pdb_name = orig_complex_graph[0]['name']
start_time = time.time()
data_list = [copy.deepcopy(orig_complex_graph) for _ in range(self.samples_per_complex)]
res_num = int(orig_complex_graph[0]['receptor'].pos.shape[0])
step_num_water = int(res_num * water_ratio)
total_num_water = int(res_num * total_resample_ratio)
total_sampled_water = 0
prediction_list = []
confidence_list = []
for i in range(resample_steps):
sample_data_list = copy.deepcopy(data_list)
randomize_position_multiple(sample_data_list, False, self.original_model_args.tr_sigma_max, water_num=step_num_water)
predictions, confidences = sampling(data_list=sample_data_list, model=model,
inference_steps=self.inference_steps,
tr_schedule=tr_schedule,
device=self.device, t_to_sigma=t_to_sigma, model_args=self.original_model_args,
save_visualization=self.save_visualization)
prediction_list.append(predictions)
confidence_list.append(confidences)
orig_complex_graph['ligand'].orig_pos = (orig_complex_graph['ligand'].pos.cpu().numpy() + orig_complex_graph.original_center.cpu().numpy())
orig_water_pos = np.expand_dims(orig_complex_graph['ligand'].orig_pos - orig_complex_graph.original_center.cpu().numpy(), axis=0)
if isinstance(orig_complex_graph['ligand'].orig_pos, list):
orig_complex_graph['ligand'].orig_pos = orig_complex_graph['ligand'].orig_pos[0]
real_water_pos = find_real_water_pos(os.path.join(self.args.data_dir, f"{orig_complex_graph.name[0]}/{orig_complex_graph.name[0]}_water.pdb"))
water_pos_list = []
for complex_graphs in prediction_list:
for complex_graph in complex_graphs:
water_pos_list.append(complex_graph['ligand'].pos.cpu().numpy())
all_water_pos = np.concatenate(water_pos_list, axis=0)
water_pos = np.asarray([all_water_pos], dtype=np.float32)
positions_new = water_pos.squeeze(0) + orig_complex_graph.original_center.cpu().numpy()
mad, indices = get_nearest_point_distances(positions_new, real_water_pos)
mads.append(mad)
full_water_positions.append(water_pos)
names.append(orig_complex_graph.name[0])
end_time = time.time()
processing_time = end_time - start_time
log_data.append((pdb_name, res_num, f"{processing_time:.2f}"))
assert(len(orig_complex_graph.name) == 1)
with open(os.path.join(self.full_cache_path, f"water_positions{'' if self.cache_creation_id is None else '_id' + str(self.cache_creation_id)}.pkl"), 'wb') as f:
pickle.dump((full_water_positions, mads), f)
with open(os.path.join(self.full_cache_path, f"complex_names_in_same_order{'' if self.cache_creation_id is None else '_id' + str(self.cache_creation_id)}.pkl"), 'wb') as f:
pickle.dump((names), f)
with open(f"logs/processing_log_rr{total_resample_ratio}.txt", "w") as log_file:
for record in log_data:
log_file.write(f"{record[0]} {record[1]} {record[2]}\n")
| Python |
3D | kuangxh9/SuperWater | confidence/confidence_train.py | .py | 15,068 | 301 | import gc
import math
import os
import shutil
from argparse import Namespace, ArgumentParser, FileType
import torch.nn.functional as F
from functools import partial
import wandb
import torch
from sklearn.metrics import roc_auc_score
from torch_geometric.loader import DataListLoader, DataLoader
from tqdm import tqdm
from datasets.pdbbind import PDBBind, NoiseTransform
from confidence.dataset import ConfidenceDataset
from utils.training import AverageMeter
torch.multiprocessing.set_sharing_strategy('file_system')
import yaml
from utils.utils import save_yaml_file, get_optimizer_and_scheduler, get_model
from utils.diffusion_utils import t_to_sigma as t_to_sigma_compl
from confidence.dataset import get_args
from utils.parsing import parse_confidence_args
args = parse_confidence_args()
if args.config:
config_dict = yaml.load(args.config, Loader=yaml.FullLoader)
arg_dict = args.__dict__
for key, value in config_dict.items():
if isinstance(value, list):
for v in value:
arg_dict[key].append(v)
else:
arg_dict[key] = value
args.config = args.config.name
assert(args.main_metric_goal == 'max' or args.main_metric_goal == 'min')
def sigmoid_function(target, scale=4):
'''
Sigmoid function that normalizes input distance.
Parameters
----------
target : torch.Tensor
torch.Tensor with your targets, of length (N).
scale : float
Number of scaling for the sigmoid function, default 2.
Returns
-------
torch.Tensor of shape (N) set to [0,1] interval by the sigmoid function
'''
return (2/(1+torch.exp(-(scale*target)/torch.log(torch.tensor(2))))-1)**2
def train_epoch(model, loader, optimizer, mad_prediction):
model.train()
meter = AverageMeter(['confidence_loss'])
for data in tqdm(loader, total=len(loader)):
if device.type == 'cuda' and len(data) % torch.cuda.device_count() == 1 or device.type == 'cpu' and data.num_graphs == 1:
print("Skipping batch of size 1 since otherwise batchnorm would not work.")
optimizer.zero_grad()
try:
pred = model(data)
if mad_prediction:
labels = torch.cat([graph.mad for graph in data]).to(device) if isinstance(data, list) else data.mad
norm_labels = sigmoid_function(labels)
confidence_loss = F.mse_loss(pred, norm_labels)
else:
if isinstance(args.mad_classification_cutoff, list):
labels = torch.cat([graph.y_binned for graph in data]).to(device) if isinstance(data, list) else data.y_binned
confidence_loss = F.cross_entropy(pred, labels)
else:
labels = torch.cat([graph.y for graph in data]).to(device) if isinstance(data, list) else data.y
confidence_loss = F.binary_cross_entropy_with_logits(pred, labels)
confidence_loss.backward()
optimizer.step()
meter.add([confidence_loss.cpu().detach()])
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
gc.collect()
continue
else:
raise e
return meter.summary()
def test_epoch(model, loader, mad_prediction):
model.eval()
meter = AverageMeter(['confidence_loss'], unpooled_metrics=True) if mad_prediction else AverageMeter(['confidence_loss', 'accuracy', 'ROC AUC'], unpooled_metrics=True)
all_labels = []
for data in tqdm(loader, total=len(loader)):
try:
with torch.no_grad():
pred = model(data)
affinity_loss = torch.tensor(0.0, dtype=torch.float, device=pred[0].device)
accuracy = torch.tensor(0.0, dtype=torch.float, device=pred[0].device)
if mad_prediction:
labels = torch.cat([graph.mad for graph in data]).to(device) if isinstance(data, list) else data.mad
norm_labels = sigmoid_function(labels)
confidence_loss = F.mse_loss(pred, norm_labels)
meter.add([confidence_loss.cpu().detach()])
else:
if isinstance(args.mad_classification_cutoff, list):
labels = torch.cat([graph.y_binned for graph in data]).to(device) if isinstance(data,list) else data.y_binned
confidence_loss = F.cross_entropy(pred, labels)
else:
labels = torch.cat([graph.y for graph in data]).to(device) if isinstance(data, list) else data.y
# labels = labels.flatten()
confidence_loss = F.binary_cross_entropy_with_logits(pred, labels)
accuracy = torch.mean((labels == (pred > 0).float()).float())
try:
roc_auc = roc_auc_score(labels.detach().cpu().numpy(), pred.detach().cpu().numpy())
except ValueError as e:
if 'Only one class present in y_true. ROC AUC score is not defined in that case.' in str(e):
roc_auc = 0
else:
raise e
meter.add([confidence_loss.cpu().detach(), accuracy.cpu().detach(), torch.tensor(roc_auc)])
all_labels.append(labels)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
for p in model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
all_labels = torch.cat(all_labels)
if mad_prediction:
baseline_metric = ((all_labels - all_labels.mean()).abs()).mean()
else:
baseline_metric = all_labels.sum() / len(all_labels)
results = meter.summary()
results.update({'baseline_metric': baseline_metric})
return meter.summary(), baseline_metric
def train(args, model, optimizer, scheduler, train_loader, val_loader, run_dir):
best_val_metric = math.inf if args.main_metric_goal == 'min' else 0
best_epoch = 0
print("Starting training...")
for epoch in range(args.n_epochs):
logs = {}
train_metrics = train_epoch(model, train_loader, optimizer, args.mad_prediction)
print("Epoch {}: Training loss {:.4f}".format(epoch, train_metrics['confidence_loss']))
val_metrics, baseline_metric = test_epoch(model, val_loader, args.mad_prediction)
if args.mad_prediction:
print("Epoch {}: Validation loss {:.4f}".format(epoch, val_metrics['confidence_loss']))
else:
print("Epoch {}: Validation loss {:.4f} accuracy {:.4f}".format(epoch, val_metrics['confidence_loss'], val_metrics['accuracy']))
if args.wandb:
logs.update({'valinf_' + k: v for k, v in val_metrics.items()}, step=epoch + 1)
logs.update({'train_' + k: v for k, v in train_metrics.items()}, step=epoch + 1)
logs.update({'mean_mad' if args.mad_prediction else 'fraction_positives': baseline_metric,
'current_lr': optimizer.param_groups[0]['lr']})
wandb.log(logs, step=epoch + 1)
if scheduler:
scheduler.step(val_metrics[args.main_metric])
state_dict = model.module.state_dict() if device.type == 'cuda' else model.state_dict()
if args.main_metric_goal == 'min' and val_metrics[args.main_metric] < best_val_metric or \
args.main_metric_goal == 'max' and val_metrics[args.main_metric] > best_val_metric:
best_val_metric = val_metrics[args.main_metric]
best_epoch = epoch
torch.save(state_dict, os.path.join(run_dir, 'best_model.pt'))
if args.model_save_frequency > 0 and (epoch + 1) % args.model_save_frequency == 0:
torch.save(state_dict, os.path.join(run_dir, f'model_epoch{epoch+1}.pt'))
if args.best_model_save_frequency > 0 and (epoch + 1) % args.best_model_save_frequency == 0:
shutil.copyfile(os.path.join(run_dir, 'best_model.pt'), os.path.join(run_dir, f'best_model_epoch{epoch+1}.pt'))
torch.save({
'epoch': epoch,
'model': state_dict,
'optimizer': optimizer.state_dict(),
}, os.path.join(run_dir, 'last_model.pt'))
print("Best Validation accuracy {} on Epoch {}".format(best_val_metric, best_epoch))
def construct_loader_origin(args_confidence, args, t_to_sigma):
## the only difference compared to construct_loader is that we set batch_size = 1
## and we used DataLoader not DataLoaderList
common_args = {'transform': None, 'root': args.data_dir, 'limit_complexes': args.limit_complexes,
'receptor_radius': args.receptor_radius,
'c_alpha_max_neighbors': args.c_alpha_max_neighbors,
'remove_hs': args.remove_hs, 'max_lig_size': args.max_lig_size,
'popsize': args.matching_popsize, 'maxiter': args.matching_maxiter,
'num_workers': args.num_workers, 'all_atoms': args.all_atoms,
'atom_radius': args.atom_radius, 'atom_max_neighbors': args.atom_max_neighbors,
'esm_embeddings_path': args.esm_embeddings_path}
train_dataset = PDBBind(cache_path=args.cache_path, split_path=args_confidence.split_train, keep_original=True,
num_conformers=args.num_conformers, **common_args)
val_dataset = PDBBind(cache_path=args.cache_path, split_path=args_confidence.split_val, keep_original=True, **common_args)
loader_class = DataLoader
train_loader = loader_class(dataset=train_dataset, batch_size=args_confidence.batch_size_preprocessing, num_workers=args_confidence.num_workers, shuffle=False, pin_memory=args.pin_memory)
val_loader = loader_class(dataset=val_dataset, batch_size=args_confidence.batch_size_preprocessing, num_workers=args_confidence.num_workers, shuffle=False, pin_memory=args.pin_memory)
infer_loader = loader_class(dataset=val_dataset, batch_size=args_confidence.batch_size_preprocessing, num_workers=args_confidence.num_workers, shuffle=False, pin_memory=args.pin_memory)
return train_loader, val_loader, infer_loader
def construct_loader_confidence(args, device):
common_args = {'cache_path': args.cache_path, 'original_model_dir': args.original_model_dir, 'device': device,
'inference_steps': args.inference_steps, 'samples_per_complex': args.samples_per_complex,
'limit_complexes': args.limit_complexes, 'all_atoms': args.all_atoms, 'balance': args.balance,
'mad_classification_cutoff': args.mad_classification_cutoff, 'use_original_model_cache': args.use_original_model_cache,
'cache_creation_id': args.cache_creation_id, "cache_ids_to_combine": args.cache_ids_to_combine,
"model_ckpt": args.ckpt,
"running_mode": args.running_mode,
"water_ratio": args.water_ratio,
"resample_steps": args.resample_steps}
loader_class = DataListLoader if torch.cuda.is_available() else DataLoader
exception_flag = False
# construct original loader
original_model_args = get_args(args.original_model_dir)
t_to_sigma = partial(t_to_sigma_compl, args=original_model_args)
train_loader, val_loader, infer_loader = construct_loader_origin(args, original_model_args, t_to_sigma)
try:
train_dataset = ConfidenceDataset(loader=train_loader, split=os.path.splitext(os.path.basename(args.split_train))[0], args=args, **common_args)
train_loader = loader_class(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)
except Exception as e:
if 'The generated ligand positions with cache_id do not exist:' in str(e):
print("HAPPENING | Encountered the following exception when loading the confidence train dataset:")
print(str(e))
print("HAPPENING | We are still continuing because we want to try to generate the validation dataset if it has not been created yet:")
exception_flag = True
else: raise e
val_dataset = ConfidenceDataset(loader=val_loader, split=os.path.splitext(os.path.basename(args.split_val))[0], args=args, **common_args)
val_loader = loader_class(dataset=val_dataset, batch_size=args.batch_size, shuffle=True)
if exception_flag: raise Exception('We encountered the exception during train dataset loading: ', e)
return train_loader, val_loader
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open(f'{args.original_model_dir}/model_parameters.yml') as f:
score_model_args = Namespace(**yaml.full_load(f))
# construct loader
train_loader, val_loader = construct_loader_confidence(args, device)
model = get_model(score_model_args if args.transfer_weights else args, device, t_to_sigma=None, confidence_mode=True)
optimizer, scheduler = get_optimizer_and_scheduler(args, model, scheduler_mode=args.main_metric_goal)
if args.transfer_weights:
print("HAPPENING | Transferring weights from original_model_dir to the new model after using original_model_dir's arguments to construct the new model.")
checkpoint = torch.load(os.path.join(args.original_model_dir,args.ckpt), map_location=device)
model_state_dict = model.state_dict()
transfer_weights_dict = {k: v for k, v in checkpoint.items() if k in list(model_state_dict.keys())}
model_state_dict.update(transfer_weights_dict) # update the layers with the pretrained weights
model.load_state_dict(model_state_dict)
elif args.restart_dir:
dict = torch.load(f'{args.restart_dir}/last_model.pt', map_location=torch.device('cpu'))
model.module.load_state_dict(dict['model'], strict=True)
optimizer.load_state_dict(dict['optimizer'])
print("Restarting from epoch", dict['epoch'])
numel = sum([p.numel() for p in model.parameters()])
print('Model with', numel, 'parameters')
if args.wandb:
wandb.init(
entity='xiaohan-kuang',
settings=wandb.Settings(start_method="fork"),
project=args.project,
name=args.run_name,
config=args
)
wandb.log({'numel': numel})
# record parameters
run_dir = os.path.join(args.log_dir, args.run_name)
yaml_file_name = os.path.join(run_dir, 'model_parameters.yml')
save_yaml_file(yaml_file_name, args.__dict__)
args.device = device
train(args, model, optimizer, scheduler, train_loader, val_loader, run_dir) | Python |
3D | kuangxh9/SuperWater | webapp/app.py | .py | 11,818 | 402 | import os
import shutil
import subprocess
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_file,
session,
Response,
)
import traceback
app = Flask(__name__, static_folder="static", template_folder="templates")
app.secret_key = "SESSION_DUMMY_KEY"
@app.route("/")
def index():
return render_template("index.html")
@app.route("/download-demo/<filename>")
def download_file(filename):
return send_file(
os.path.join(app.static_folder, "files", filename), as_attachment=True
)
@app.route("/inference")
def inference():
error = session.pop("error", None)
output = session.pop("output", None)
inference_done = session.pop("inference_done", False)
return render_template(
"inference.html", error=error, output=output, inference_done=inference_done
)
@app.route("/upload_single", methods=["POST"])
def upload_single():
file = request.files.get("single_pdb")
if not file:
session["error"] = "File cannot be empty."
session["output"] = ""
session["inference_done"] = False
return redirect(url_for("inference"))
script_dir = os.path.dirname(os.path.abspath(__file__))
data_root = os.path.join(script_dir, "..", "data") # e.g. parent dir + data
upload_dir = os.path.join(data_root, "web_upload_data")
if os.path.exists(upload_dir):
for f in os.listdir(upload_dir):
os.remove(os.path.join(upload_dir, f))
else:
os.makedirs(upload_dir, exist_ok=True)
file_path = os.path.join(upload_dir, file.filename)
file.save(file_path)
cmd = [
"python",
os.path.join(script_dir, "..", "organize_pdb_dataset.py"),
"--raw_data",
"web_upload_data",
"--data_root",
"data",
"--output_dir",
"web_upload_data_organized",
"--splits_path",
"data/splits",
"--dummy_water_dir",
"data/dummy_water",
"--logs_dir",
"logs",
]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
check=False,
cwd=os.path.join(script_dir, ".."),
)
if result.returncode != 0:
session["error"] = result.stderr
session["output"] = result.stdout
session["inference_done"] = False
else:
session["error"] = None
session["output"] = result.stdout
session["inference_done"] = False
return redirect(url_for("inference"))
@app.route("/upload_folder", methods=["POST"])
def upload_folder():
files = request.files.getlist("pdb_folder")
if not files or len(files) == 0:
session["error"] = "No folder or PDB files selected."
session["output"] = ""
session["inference_done"] = False
return redirect(url_for("inference"))
script_dir = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.join(script_dir, "..", "data")
upload_dir = os.path.join(base_path, "web_upload_data")
if os.path.exists(upload_dir):
for f in os.listdir(upload_dir):
os.remove(os.path.join(upload_dir, f))
else:
os.makedirs(upload_dir, exist_ok=True)
saved_any_file = False
for file in files:
if file and file.filename.endswith(".pdb"):
file_path = os.path.join(upload_dir, os.path.basename(file.filename))
file.save(file_path)
saved_any_file = True
if not saved_any_file:
session["error"] = "No .pdb files found in selected folder."
session["output"] = ""
session["inference_done"] = False
return redirect(url_for("inference"))
cmd = [
"python",
os.path.join(script_dir, "..", "organize_pdb_dataset.py"),
"--raw_data",
"web_upload_data",
"--data_root",
"data",
"--output_dir",
"web_upload_data_organized",
"--splits_path",
"data/splits",
"--dummy_water_dir",
"data/dummy_water",
"--logs_dir",
"logs",
]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
check=False,
cwd=os.path.join(script_dir, ".."),
)
if result.returncode != 0:
session["error"] = result.stderr
session["output"] = result.stdout
session["inference_done"] = False
else:
session["error"] = None
session["output"] = result.stdout
session["inference_done"] = False
return redirect(url_for("inference"))
@app.route("/start_inference", methods=["POST"])
def start_inference():
water_ratio = request.form.get("water_ratio", "1")
filter_threshold = request.form.get("filter_threshold", "0.1")
script_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.join(script_dir, "..")
esm_dir = os.path.join(script_dir, "..", "data")
data_dir = os.path.join(parent_dir, "data", "web_upload_data_organized")
if not os.path.isdir(data_dir) or not os.listdir(data_dir):
session[
"error"
] = "Please upload files first! (web_upload_data_organized is empty)"
session["output"] = ""
session["inference_done"] = False
return redirect(url_for("inference"))
# 1) ESM embeddings
cmd1 = [
"python",
os.path.join(
script_dir, "..", "datasets", "esm_embedding_preparation_water.py"
),
"--data_dir",
"data/web_upload_data_organized",
"--out_file",
"data/prepared_for_esm_web_upload_data_organized.fasta",
]
result1 = subprocess.run(
cmd1, capture_output=True, text=True, check=False, cwd=parent_dir
)
if result1.returncode != 0:
session["error"] = result1.stderr
session["output"] = result1.stdout
session["inference_done"] = False
return redirect(url_for("inference"))
# 2) ESM Feature
cmd2 = [
"python",
os.path.join(script_dir, "..", "esm", "scripts", "extract.py"),
"esm2_t33_650M_UR50D",
"prepared_for_esm_web_upload_data_organized.fasta",
"web_upload_data_organized_embeddings_output",
"--repr_layers",
"33",
"--include",
"per_tok",
"--truncation_seq_length",
"4096",
]
result2 = subprocess.run(
cmd2, capture_output=True, text=True, check=False, cwd=esm_dir
)
if result2.returncode != 0:
session["error"] = result2.stderr
session["output"] = result2.stdout
session["inference_done"] = False
return redirect(url_for("inference"))
pred_dir_name = f"web_prediction_rr{water_ratio}_cap{filter_threshold}"
cmd3 = [
"python",
"-m",
"inference_water_pos",
"--original_model_dir",
"workdir/all_atoms_score_model_res15_17092",
"--confidence_dir",
"workdir/confidence_model_17092_sigmoid_rr15",
"--data_dir",
"data/web_upload_data_organized",
"--ckpt",
"best_model.pt",
"--all_atoms",
"--cache_path",
"data/cache_confidence",
"--save_pos_path",
pred_dir_name,
"--split_test",
"data/splits/web_upload_data_organized.txt",
"--inference_steps",
"20",
"--esm_embeddings_path",
"data/web_upload_data_organized_embeddings_output",
"--cap",
filter_threshold,
"--running_mode",
"test",
"--mad_prediction",
"--save_pos",
"--water_ratio",
water_ratio,
]
result3 = subprocess.run(
cmd3, capture_output=True, text=True, check=False, cwd=parent_dir
)
if result3.returncode != 0:
session["error"] = result3.stderr
session["output"] = result3.stdout
session["inference_done"] = False
return redirect(url_for("inference"))
session["error"] = None
session["output"] = result3.stdout
session["inference_done"] = True
session["pred_dir"] = pred_dir_name
return redirect(url_for("inference"))
@app.route("/download_prediction")
def download_prediction():
script_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.join(script_dir, "..")
pred_dir_name = session.get("pred_dir", None)
if not pred_dir_name:
session["error"] = "No inference result found. Please run inference first."
session["output"] = ""
return redirect(url_for("inference"))
pred_dir = os.path.join(parent_dir, "inference_out", pred_dir_name)
if not os.path.isdir(pred_dir):
session[
"error"
] = f"Prediction folder {pred_dir_name} not found in inference_out/"
session["output"] = ""
return redirect(url_for("inference"))
zip_basename = os.path.join(parent_dir, "inference_out", pred_dir_name)
zip_path = zip_basename + ".zip"
if os.path.exists(zip_path):
os.remove(zip_path)
shutil.make_archive(base_name=zip_basename, format="zip", root_dir=pred_dir)
session["error"] = None
session[
"output"
] = "File is generated. Please click 'Cleanup' after your download if you wish to remove files."
session["inference_done"] = False
return send_file(zip_path, as_attachment=True)
@app.route("/cleanup_prediction")
def cleanup_prediction():
script_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.join(script_dir, "..")
# 1) data/splits/web_upload_data_organized.txt
try:
os.remove(
os.path.join(parent_dir, "data", "splits", "web_upload_data_organized.txt")
)
except:
pass
# 2) data/web_upload_data_organized
try:
shutil.rmtree(os.path.join(parent_dir, "data", "web_upload_data_organized"))
except:
pass
# 3) data/web_upload_data
try:
shutil.rmtree(os.path.join(parent_dir, "data", "web_upload_data"))
except:
pass
# 4) data/web_upload_data_organized_embeddings_output
try:
shutil.rmtree(
os.path.join(
parent_dir, "data", "web_upload_data_organized_embeddings_output"
)
)
except:
pass
# 5) data/prepared_for_esm_web_upload_data_organized.fasta
try:
os.remove(
os.path.join(
parent_dir, "data", "prepared_for_esm_web_upload_data_organized.fasta"
)
)
except:
pass
# 6) confidence cache
try:
shutil.rmtree(
os.path.join(
parent_dir,
"data",
"cache_confidence",
"model_all_atoms_score_model_res15_17092_split_web_upload_data_organized_limit_0",
)
)
except:
pass
# 7) all atom cache
try:
shutil.rmtree(
os.path.join(
parent_dir,
"data",
"cache_allatoms",
"limit0_INDEXweb_upload_data_organized_maxLigSizeNone_H0_recRad15.0_recMax24_atomRad5_atomMax8_esmEmbeddings",
)
)
except:
pass
# 8) remove inference_out/pred_dir if needed
pred_dir_name = session.get("pred_dir", None)
if pred_dir_name:
pred_dir = os.path.join(parent_dir, "inference_out", pred_dir_name)
try:
shutil.rmtree(pred_dir)
except:
pass
# remove the zip if it still exists
zip_path = pred_dir + ".zip"
try:
os.remove(zip_path)
except:
pass
session["error"] = None
session["output"] = "Cleanup done. All files are removed!"
return redirect(url_for("inference"))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8891, debug=True)
| Python |
3D | kuangxh9/SuperWater | models/all_atom_score_model.py | .py | 23,325 | 401 | from e3nn import o3
import torch
from torch import nn
from torch.nn import functional as F
from torch_cluster import radius, radius_graph
from torch_scatter import scatter_mean
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter('ignore')
from models.score_model import AtomEncoder, TensorProductConvLayer, GaussianSmearing
from utils import so3, torus
from datasets.process_mols import lig_feature_dims, rec_residue_feature_dims, rec_atom_feature_dims
from torch_scatter import scatter_mean
class CrossAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads):
super(CrossAttentionLayer, self).__init__()
self.attention = nn.MultiheadAttention(embed_dim, num_heads)
def forward(self, query_features, key_value_features):
query = query_features.unsqueeze(1) # (num_lig_nodes, 1, embed_dim)
key = value = key_value_features.unsqueeze(1) # (num_rec_nodes or num_atom_nodes, 1, embed_dim)
attn_output, _ = self.attention(query, key, value)
return attn_output.squeeze(1) # (num_lig_nodes, embed_dim)
class SelfAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads):
super(SelfAttentionLayer, self).__init__()
self.attention = nn.MultiheadAttention(embed_dim, num_heads)
def forward(self, features):
query = key = value = features.unsqueeze(1) # (num_nodes, 1, embed_dim)
attn_output, _ = self.attention(query, key, value)
return attn_output.squeeze(1)
class TensorProductScoreModel(torch.nn.Module):
def __init__(self, t_to_sigma, device, timestep_emb_func, in_lig_edge_features=4, sigma_embed_dim=32, sh_lmax=2,
ns=16, nv=4, num_conv_layers=2, lig_max_radius=5, rec_max_radius=30, cross_max_distance=250,
center_max_distance=30, distance_embed_dim=32, cross_distance_embed_dim=32, no_torsion=True,
scale_by_sigma=True, use_second_order_repr=False, batch_norm=True,
dynamic_max_cross=False, dropout=0.0, lm_embedding_type=False, confidence_mode=False,
confidence_dropout=0, confidence_no_batchnorm=False, num_confidence_outputs=1, recycle_output_size=0, mean_pool=True):
super(TensorProductScoreModel, self).__init__()
self.t_to_sigma = t_to_sigma
self.in_lig_edge_features = in_lig_edge_features
self.sigma_embed_dim = sigma_embed_dim
self.lig_max_radius = lig_max_radius
self.rec_max_radius = rec_max_radius
self.cross_max_distance = cross_max_distance
self.dynamic_max_cross = dynamic_max_cross
self.center_max_distance = center_max_distance
self.distance_embed_dim = distance_embed_dim
self.cross_distance_embed_dim = cross_distance_embed_dim
self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax)
self.ns, self.nv = ns, nv
self.scale_by_sigma = scale_by_sigma
self.device = device
self.no_torsion = no_torsion
self.num_conv_layers = num_conv_layers
self.timestep_emb_func = timestep_emb_func
self.confidence_mode = confidence_mode
self.lig_rec_cross_attention = CrossAttentionLayer(embed_dim=ns, num_heads=4)
self.lig_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=lig_feature_dims, sigma_embed_dim=sigma_embed_dim, additional_dim=recycle_output_size)
self.lig_edge_embedding = nn.Sequential(nn.Linear(in_lig_edge_features + sigma_embed_dim + distance_embed_dim, ns),nn.ReLU(),nn.Dropout(dropout),nn.Linear(ns, ns))
self.rec_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_residue_feature_dims, sigma_embed_dim=sigma_embed_dim, lm_embedding_type=lm_embedding_type)
self.rec_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.atom_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_atom_feature_dims, sigma_embed_dim=sigma_embed_dim)
self.atom_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.lr_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.ar_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.la_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.lig_distance_expansion = GaussianSmearing(0.0, lig_max_radius, distance_embed_dim)
self.rec_distance_expansion = GaussianSmearing(0.0, rec_max_radius, distance_embed_dim)
self.cross_distance_expansion = GaussianSmearing(0.0, cross_max_distance, cross_distance_embed_dim)
if use_second_order_repr:
irrep_seq = [
f'{ns}x0e',
f'{ns}x0e + {nv}x1o + {nv}x2e',
f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o',
f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o + {ns}x0o'
]
else:
irrep_seq = [
f'{ns}x0e',
f'{ns}x0e + {nv}x1o',
f'{ns}x0e + {nv}x1o + {nv}x1e',
f'{ns}x0e + {nv}x1o + {nv}x1e + {ns}x0o'
]
# convolutional layers
conv_layers = []
for i in range(num_conv_layers):
in_irreps = irrep_seq[min(i, len(irrep_seq) - 1)]
out_irreps = irrep_seq[min(i + 1, len(irrep_seq) - 1)]
parameters = {
'in_irreps': in_irreps,
'sh_irreps': self.sh_irreps,
'out_irreps': out_irreps,
'n_edge_features': 3 * ns,
'residual': False,
'batch_norm': batch_norm,
'dropout': dropout
}
for _ in range(9): # 3 intra & 6 inter per each layer
conv_layers.append(TensorProductConvLayer(**parameters))
self.conv_layers = nn.ModuleList(conv_layers)
# confidence and affinity prediction layers
if self.confidence_mode:
output_confidence_dim = num_confidence_outputs
self.confidence_predictor = nn.Sequential(
nn.Linear(2 * self.ns if num_conv_layers >= 3 else self.ns, ns),
nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
nn.ReLU(),
nn.Dropout(confidence_dropout),
nn.Linear(ns, ns),
nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
nn.ReLU(),
nn.Dropout(confidence_dropout),
nn.Linear(ns, output_confidence_dim)
)
else:
# convolution for translational and rotational scores
self.center_distance_expansion = GaussianSmearing(0.0, center_max_distance, distance_embed_dim)
self.center_edge_embedding = nn.Sequential(
nn.Linear(distance_embed_dim + sigma_embed_dim, ns),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(ns, ns)
)
self.final_conv = TensorProductConvLayer(
in_irreps=self.conv_layers[-1].out_irreps,
sh_irreps=self.sh_irreps,
out_irreps=f'1x1o + 1x1e',
n_edge_features=2 * ns,
residual=False,
dropout=dropout,
batch_norm=batch_norm
)
self.tr_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1))
def forward(self, data):
if not self.confidence_mode:
tr_sigma = self.t_to_sigma(*[data.complex_t[noise_type] for noise_type in ['tr']])
else:
tr_sigma = data.complex_t['tr']
ll_distance_cutoff = (tr_sigma * 3 + 20).unsqueeze(1)
lig_node_attr = self.build_lig_node_attr(data)
lig_node_attr = self.lig_node_embedding(lig_node_attr)
# build receptor graph
rec_node_attr, rec_edge_index, rec_edge_attr, rec_edge_sh = self.build_rec_conv_graph(data)
rec_node_attr = self.rec_node_embedding(rec_node_attr)
rec_edge_attr = self.rec_edge_embedding(rec_edge_attr)
# build atom graph
atom_node_attr, atom_edge_index, atom_edge_attr, atom_edge_sh = self.build_atom_conv_graph(data)
atom_node_attr = self.atom_node_embedding(atom_node_attr)
atom_edge_attr = self.atom_edge_embedding(atom_edge_attr)
# build cross graph
lr_cross_distance_cutoff = (tr_sigma * 3 + 20).unsqueeze(1) if self.dynamic_max_cross else self.cross_max_distance
lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \
la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh = self.build_cross_conv_graph(data, lr_cross_distance_cutoff)
lr_edge_attr= self.lr_edge_embedding(lr_edge_attr)
la_edge_attr = self.la_edge_embedding(la_edge_attr)
ar_edge_attr = self.ar_edge_embedding(ar_edge_attr)
for l in range(self.num_conv_layers):
lr_edge_attr_ = torch.cat([lr_edge_attr, lig_node_attr[lr_edge_index[0], :self.ns], rec_node_attr[lr_edge_index[1], :self.ns]], -1)
lr_update = self.conv_layers[9*l+1](rec_node_attr, lr_edge_index, lr_edge_attr_, lr_edge_sh,
out_nodes=lig_node_attr.shape[0])
la_edge_attr_ = torch.cat([la_edge_attr, lig_node_attr[la_edge_index[0], :self.ns], atom_node_attr[la_edge_index[1], :self.ns]], -1)
la_update = self.conv_layers[9*l+2](atom_node_attr, la_edge_index, la_edge_attr_, la_edge_sh,
out_nodes=lig_node_attr.shape[0])
if l != self.num_conv_layers-1: # last layer optimisation
# ATOM UPDATES
atom_edge_attr_ = torch.cat([atom_edge_attr, atom_node_attr[atom_edge_index[0], :self.ns], atom_node_attr[atom_edge_index[1], :self.ns]], -1)
atom_update = self.conv_layers[9*l+3](atom_node_attr, atom_edge_index, atom_edge_attr_, atom_edge_sh)
al_edge_attr_ = torch.cat([la_edge_attr, atom_node_attr[la_edge_index[1], :self.ns], lig_node_attr[la_edge_index[0], :self.ns]], -1)
al_update = self.conv_layers[9*l+4](lig_node_attr, torch.flip(la_edge_index, dims=[0]), al_edge_attr_,
la_edge_sh, out_nodes=atom_node_attr.shape[0])
ar_edge_attr_ = torch.cat([ar_edge_attr, atom_node_attr[ar_edge_index[0], :self.ns], rec_node_attr[ar_edge_index[1], :self.ns]],-1)
ar_update = self.conv_layers[9*l+5](rec_node_attr, ar_edge_index, ar_edge_attr_, ar_edge_sh, out_nodes=atom_node_attr.shape[0])
# RECEPTOR updates
rec_edge_attr_ = torch.cat([rec_edge_attr, rec_node_attr[rec_edge_index[0], :self.ns], rec_node_attr[rec_edge_index[1], :self.ns]], -1)
rec_update = self.conv_layers[9*l+6](rec_node_attr, rec_edge_index, rec_edge_attr_, rec_edge_sh)
rl_edge_attr_ = torch.cat([lr_edge_attr, rec_node_attr[lr_edge_index[1], :self.ns], lig_node_attr[lr_edge_index[0], :self.ns]], -1)
rl_update = self.conv_layers[9*l+7](lig_node_attr, torch.flip(lr_edge_index, dims=[0]), rl_edge_attr_,
lr_edge_sh, out_nodes=rec_node_attr.shape[0])
ra_edge_attr_ = torch.cat([ar_edge_attr, rec_node_attr[ar_edge_index[1], :self.ns], atom_node_attr[ar_edge_index[0], :self.ns]], -1)
ra_update = self.conv_layers[9*l+8](atom_node_attr, torch.flip(ar_edge_index, dims=[0]), ra_edge_attr_,
ar_edge_sh, out_nodes=rec_node_attr.shape[0])
# padding original features and update features with residual updates
lig_node_attr = F.pad(lig_node_attr, (0, la_update.shape[-1] - lig_node_attr.shape[-1]))
lig_node_attr = lig_node_attr + la_update + lr_update
if l != self.num_conv_layers - 1: # last layer optimisation
atom_node_attr = F.pad(atom_node_attr, (0, atom_update.shape[-1] - rec_node_attr.shape[-1]))
atom_node_attr = atom_node_attr + atom_update + al_update + ar_update
rec_node_attr = F.pad(rec_node_attr, (0, rec_update.shape[-1] - rec_node_attr.shape[-1]))
rec_node_attr = rec_node_attr + rec_update + ra_update + rl_update
# confidence and affinity prediction
if self.confidence_mode:
scalar_lig_attr = torch.cat([lig_node_attr[:,:self.ns],lig_node_attr[:,-self.ns:]], dim=1) if self.num_conv_layers >= 3 else lig_node_attr[:,:self.ns]
confidence = self.confidence_predictor(scalar_lig_attr).squeeze(dim=-1)
return confidence
center_edge_index, center_edge_attr, center_edge_sh = self.build_center_conv_graph(data)
center_edge_attr = self.center_edge_embedding(center_edge_attr)
center_edge_attr = torch.cat([center_edge_attr, lig_node_attr[center_edge_index[1], :self.ns]], -1)
global_pred = self.final_conv(lig_node_attr, center_edge_index, center_edge_attr, center_edge_sh, out_nodes=lig_node_attr.shape[0], scatter_arange = True)
if torch.isnan(global_pred).any():
print("NaN found in global_pred")
global_pred = torch.nan_to_num(global_pred, nan=0.0)
tr_pred = global_pred[:, :3] + global_pred[:, 3:6]
data.graph_sigma_emb = self.timestep_emb_func(data.complex_t['tr'])
tr_norm = torch.linalg.vector_norm(tr_pred, dim=1).unsqueeze(1)
expand_sigma_emb = torch.index_select(data.graph_sigma_emb, dim=0, index=data['ligand'].batch)
tr_pred = tr_pred / tr_norm * self.tr_final_layer(torch.cat([tr_norm, expand_sigma_emb], dim=1))
expand_tr_sigma = torch.index_select(tr_sigma, dim=0, index=data['ligand'].batch)
if self.scale_by_sigma:
tr_pred = tr_pred / expand_tr_sigma.unsqueeze(1)
return tr_pred, expand_tr_sigma, data['ligand'].batch
def build_lig_node_attr(self, data):
data['ligand'].node_sigma_emb = self.timestep_emb_func(data['ligand'].node_t['tr'])
node_attr = torch.cat([data['ligand'].x, data['ligand'].node_sigma_emb], 1)
return node_attr
def build_rec_conv_graph(self, data):
data['receptor'].node_sigma_emb = self.timestep_emb_func(data['receptor'].node_t['tr'])
node_attr = torch.cat([data['receptor'].x, data['receptor'].node_sigma_emb], 1)
edge_index = data['receptor', 'receptor'].edge_index
src, dst = edge_index
edge_vec = data['receptor'].pos[dst.long()] - data['receptor'].pos[src.long()]
edge_length_emb = self.rec_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['receptor'].node_sigma_emb[edge_index[0].long()]
edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return node_attr, edge_index, edge_attr, edge_sh
def build_atom_conv_graph(self, data):
# build the graph between receptor atoms
data['atom'].node_sigma_emb = self.timestep_emb_func(data['atom'].node_t['tr'])
node_attr = torch.cat([data['atom'].x, data['atom'].node_sigma_emb], 1)
edge_index = data['atom', 'atom'].edge_index
src, dst = edge_index
edge_vec = data['atom'].pos[dst.long()] - data['atom'].pos[src.long()]
edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['atom'].node_sigma_emb[edge_index[0].long()]
edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return node_attr, edge_index, edge_attr, edge_sh
def build_cross_conv_graph(self, data, lr_cross_distance_cutoff):
data['ligand'].pos = data['ligand'].pos.float()
if torch.is_tensor(lr_cross_distance_cutoff):
# different cutoff for every graph
lr_edge_index = radius(data['receptor'].pos / lr_cross_distance_cutoff[data['receptor'].batch],
data['ligand'].pos / lr_cross_distance_cutoff[data['ligand'].batch], 1,
data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
else:
lr_edge_index = radius(data['receptor'].pos, data['ligand'].pos, lr_cross_distance_cutoff,
data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
lr_edge_vec = data['receptor'].pos[lr_edge_index[1].long()] - data['ligand'].pos[lr_edge_index[0].long()]
lr_edge_length_emb = self.cross_distance_expansion(lr_edge_vec.norm(dim=-1))
lr_edge_sigma_emb = data['ligand'].node_sigma_emb[lr_edge_index[0].long()]
lr_edge_attr = torch.cat([lr_edge_sigma_emb, lr_edge_length_emb], 1)
lr_edge_sh = o3.spherical_harmonics(self.sh_irreps, lr_edge_vec, normalize=True, normalization='component')
cutoff_d = lr_cross_distance_cutoff[data['ligand'].batch[lr_edge_index[0]]].squeeze() \
if torch.is_tensor(lr_cross_distance_cutoff) else lr_cross_distance_cutoff
la_edge_index = radius(data['atom'].pos, data['ligand'].pos, self.lig_max_radius,
data['atom'].batch, data['ligand'].batch, max_num_neighbors=10000)
la_edge_vec = data['atom'].pos[la_edge_index[1].long()] - data['ligand'].pos[la_edge_index[0].long()]
la_edge_length_emb = self.cross_distance_expansion(la_edge_vec.norm(dim=-1))
la_edge_sigma_emb = data['ligand'].node_sigma_emb[la_edge_index[0].long()]
la_edge_attr = torch.cat([la_edge_sigma_emb, la_edge_length_emb], 1)
la_edge_sh = o3.spherical_harmonics(self.sh_irreps, la_edge_vec, normalize=True, normalization='component')
ar_edge_index = data['atom', 'receptor'].edge_index
valid_indices = (ar_edge_index[1] < data['receptor'].pos.size(0)) & (ar_edge_index[0] < data['atom'].pos.size(0))
ar_edge_index = ar_edge_index[:, valid_indices]
ar_edge_vec = data['receptor'].pos[ar_edge_index[1].long()] - data['atom'].pos[ar_edge_index[0].long()]
ar_edge_length_emb = self.rec_distance_expansion(ar_edge_vec.norm(dim=-1))
ar_edge_sigma_emb = data['atom'].node_sigma_emb[ar_edge_index[0].long()]
ar_edge_attr = torch.cat([ar_edge_sigma_emb, ar_edge_length_emb], 1)
ar_edge_sh = o3.spherical_harmonics(self.sh_irreps, ar_edge_vec, normalize=True, normalization='component')
return lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \
la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh
def build_center_conv_graph(self, data):
edge_index = torch.cat([data['ligand'].batch.unsqueeze(0), torch.arange(len(data['ligand'].batch)).to(data['ligand'].x.device).unsqueeze(0)], dim=0)
center_pos, count = torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device), torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device)
center_pos.index_add_(0, index=data['ligand'].batch, source=data['ligand'].pos)
center_pos = center_pos / torch.bincount(data['ligand'].batch).unsqueeze(1)
edge_vec = data['ligand'].pos[edge_index[1]] - center_pos[edge_index[0]]
edge_attr = self.center_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[1].long()]
edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return edge_index, edge_attr, edge_sh
def build_bond_conv_graph(self, data):
bonds = data['ligand', 'ligand'].edge_index[:, data['ligand'].edge_mask].long()
bond_pos = (data['ligand'].pos[bonds[0]] + data['ligand'].pos[bonds[1]]) / 2
bond_batch = data['ligand'].batch[bonds[0]]
edge_index = radius(data['ligand'].pos, bond_pos, self.lig_max_radius, batch_x=data['ligand'].batch, batch_y=bond_batch)
edge_vec = data['ligand'].pos[edge_index[1]] - bond_pos[edge_index[0]]
edge_attr = self.lig_distance_expansion(edge_vec.norm(dim=-1))
edge_attr = self.final_edge_embedding(edge_attr)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return bonds, edge_index, edge_attr, edge_sh
class RecycleNet(torch.nn.Module):
def __init__(self, t_to_sigma, device, timestep_emb_func, in_lig_edge_features=4, sigma_embed_dim=32, sh_lmax=2,
ns=16, nv=4, num_conv_layers=2, lig_max_radius=5, rec_max_radius=30, cross_max_distance=250,
center_max_distance=30, distance_embed_dim=32, cross_distance_embed_dim=32, no_torsion=True,
scale_by_sigma=True, use_second_order_repr=False, batch_norm=True,
dynamic_max_cross=False, dropout=0.0, lm_embedding_type=False, confidence_mode=False,
confidence_dropout=0, confidence_no_batchnorm=False, num_confidence_outputs=1, recycle_output_size=3):
super(RecycleNet, self).__init__()
self.recycle_output_size = recycle_output_size
self.N_cycle = 3
self.score_model = TensorProductScoreModel(t_to_sigma, device, timestep_emb_func, in_lig_edge_features, sigma_embed_dim, sh_lmax,
ns, nv, num_conv_layers, lig_max_radius, rec_max_radius, cross_max_distance,
center_max_distance, distance_embed_dim, cross_distance_embed_dim, no_torsion,
scale_by_sigma, use_second_order_repr, batch_norm,
dynamic_max_cross, dropout, lm_embedding_type, confidence_mode,
confidence_dropout, confidence_no_batchnorm, num_confidence_outputs, recycle_output_size)
def forward(self, data):
training_mode = data[0]['training_mode']
device = data['ligand'].x.device
recycle_output = torch.zeros(data['ligand'].x.shape[0], self.recycle_output_size).to(device)
data['ligand'].x = torch.cat((data['ligand'].x, recycle_output), dim=1)
for recyc in range(self.N_cycle):
if training_mode:
recycle_output = recycle_output.detach()
recycle_output, expand_tr_sigma, lig_batch = self.score_model(data)
data['ligand'].x[:, -self.recycle_output_size:] = recycle_output
return recycle_output, expand_tr_sigma, lig_batch | Python |
3D | kuangxh9/SuperWater | models/score_model.py | .py | 19,780 | 391 | import math
from e3nn import o3
import torch
from torch import nn
from torch.nn import functional as F
from torch_cluster import radius, radius_graph
from torch_scatter import scatter, scatter_mean
import numpy as np
from e3nn.nn import BatchNorm
from utils import so3, torus
from datasets.process_mols import lig_feature_dims, rec_residue_feature_dims
class AtomEncoder(torch.nn.Module):
def __init__(self, emb_dim, feature_dims, sigma_embed_dim, additional_dim=0, lm_embedding_type= None):
super(AtomEncoder, self).__init__()
self.atom_embedding_list = torch.nn.ModuleList()
self.num_categorical_features = len(feature_dims[0])
self.num_scalar_features = feature_dims[1] + sigma_embed_dim + additional_dim
self.lm_embedding_type = lm_embedding_type
for i, dim in enumerate(feature_dims[0]):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.atom_embedding_list.append(emb)
if self.num_scalar_features > 0:
self.linear = torch.nn.Linear(self.num_scalar_features, emb_dim)
if self.lm_embedding_type is not None:
if self.lm_embedding_type == 'esm':
self.lm_embedding_dim = 1280
else: raise ValueError('LM Embedding type was not correctly determined. LM embedding type: ', self.lm_embedding_type)
self.lm_embedding_layer = torch.nn.Linear(self.lm_embedding_dim + emb_dim, emb_dim)
def forward(self, x):
x_embedding = 0
if self.lm_embedding_type is not None:
assert x.shape[1] == self.num_categorical_features + self.num_scalar_features + self.lm_embedding_dim
else:
assert x.shape[1] == self.num_categorical_features + self.num_scalar_features
for i in range(self.num_categorical_features):
x_embedding += self.atom_embedding_list[i](x[:, i].long())
if self.num_scalar_features > 0:
x_embedding += self.linear(x[:, self.num_categorical_features:self.num_categorical_features + self.num_scalar_features])
if self.lm_embedding_type is not None:
x_embedding = self.lm_embedding_layer(torch.cat([x_embedding, x[:, -self.lm_embedding_dim:]], axis=1))
return x_embedding
class TensorProductConvLayer(torch.nn.Module):
def __init__(self, in_irreps, sh_irreps, out_irreps, n_edge_features, residual=True, batch_norm=True, dropout=0.0,
hidden_features=None):
super(TensorProductConvLayer, self).__init__()
self.in_irreps = in_irreps
self.out_irreps = out_irreps
self.sh_irreps = sh_irreps
self.residual = residual
if hidden_features is None:
hidden_features = n_edge_features
self.tp = tp = o3.FullyConnectedTensorProduct(in_irreps, sh_irreps, out_irreps, shared_weights=False)
self.fc = nn.Sequential(
nn.Linear(n_edge_features, hidden_features),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_features, tp.weight_numel)
)
self.batch_norm = BatchNorm(out_irreps) if batch_norm else None
def forward(self, node_attr, edge_index, edge_attr, edge_sh, out_nodes=None, reduce='mean', scatter_arange = False):
edge_src, edge_dst = edge_index
tp = self.tp(node_attr[edge_dst], edge_sh, self.fc(edge_attr))
out_nodes = out_nodes or node_attr.shape[0]
if not scatter_arange:
out = scatter(tp, edge_src, dim=0, dim_size=out_nodes, reduce=reduce)
else:
out = tp
if self.residual:
padded = F.pad(node_attr, (0, out.shape[-1] - node_attr.shape[-1]))
out = out + padded
if self.batch_norm:
out = self.batch_norm(out)
return out
class TensorProductScoreModel(torch.nn.Module):
def __init__(self, t_to_sigma, device, timestep_emb_func, in_lig_edge_features=4, sigma_embed_dim=32, sh_lmax=2,
ns=16, nv=4, num_conv_layers=2, lig_max_radius=5, rec_max_radius=30, cross_max_distance=250,
center_max_distance=30, distance_embed_dim=32, cross_distance_embed_dim=32, no_torsion=False,
scale_by_sigma=True, use_second_order_repr=False, batch_norm=True,
dynamic_max_cross=False, dropout=0.0, lm_embedding_type=None, confidence_mode=False,
confidence_dropout=0, confidence_no_batchnorm=False, num_confidence_outputs=1):
super(TensorProductScoreModel, self).__init__()
self.t_to_sigma = t_to_sigma
self.in_lig_edge_features = in_lig_edge_features
self.sigma_embed_dim = sigma_embed_dim
self.lig_max_radius = lig_max_radius
self.rec_max_radius = rec_max_radius
self.cross_max_distance = cross_max_distance
self.dynamic_max_cross = dynamic_max_cross
self.center_max_distance = center_max_distance
self.distance_embed_dim = distance_embed_dim
self.cross_distance_embed_dim = cross_distance_embed_dim
self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax)
self.ns, self.nv = ns, nv
self.scale_by_sigma = scale_by_sigma
self.device = device
self.no_torsion = no_torsion
self.timestep_emb_func = timestep_emb_func
self.confidence_mode = confidence_mode
self.num_conv_layers = num_conv_layers
self.lig_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=lig_feature_dims, sigma_embed_dim=sigma_embed_dim)
self.lig_edge_embedding = nn.Sequential(nn.Linear(in_lig_edge_features + sigma_embed_dim + distance_embed_dim, ns),nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.rec_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_residue_feature_dims, sigma_embed_dim=sigma_embed_dim, lm_embedding_type=lm_embedding_type)
self.rec_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.cross_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
self.lig_distance_expansion = GaussianSmearing(0.0, lig_max_radius, distance_embed_dim)
self.rec_distance_expansion = GaussianSmearing(0.0, rec_max_radius, distance_embed_dim)
self.cross_distance_expansion = GaussianSmearing(0.0, cross_max_distance, cross_distance_embed_dim)
if use_second_order_repr:
irrep_seq = [
f'{ns}x0e',
f'{ns}x0e + {nv}x1o + {nv}x2e',
f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o',
f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o + {ns}x0o'
]
else:
irrep_seq = [
f'{ns}x0e',
f'{ns}x0e + {nv}x1o',
f'{ns}x0e + {nv}x1o + {nv}x1e',
f'{ns}x0e + {nv}x1o + {nv}x1e + {ns}x0o'
]
lig_conv_layers, rec_conv_layers, lig_to_rec_conv_layers, rec_to_lig_conv_layers = [], [], [], []
for i in range(num_conv_layers):
in_irreps = irrep_seq[min(i, len(irrep_seq) - 1)]
out_irreps = irrep_seq[min(i + 1, len(irrep_seq) - 1)]
parameters = {
'in_irreps': in_irreps,
'sh_irreps': self.sh_irreps,
'out_irreps': out_irreps,
'n_edge_features': 3 * ns,
'hidden_features': 3 * ns,
'residual': False,
'batch_norm': batch_norm,
'dropout': dropout
}
lig_layer = TensorProductConvLayer(**parameters)
lig_conv_layers.append(lig_layer)
rec_layer = TensorProductConvLayer(**parameters)
rec_conv_layers.append(rec_layer)
lig_to_rec_layer = TensorProductConvLayer(**parameters)
lig_to_rec_conv_layers.append(lig_to_rec_layer)
rec_to_lig_layer = TensorProductConvLayer(**parameters)
rec_to_lig_conv_layers.append(rec_to_lig_layer)
self.lig_conv_layers = nn.ModuleList(lig_conv_layers)
self.rec_conv_layers = nn.ModuleList(rec_conv_layers)
self.lig_to_rec_conv_layers = nn.ModuleList(lig_to_rec_conv_layers)
self.rec_to_lig_conv_layers = nn.ModuleList(rec_to_lig_conv_layers)
if self.confidence_mode:
self.confidence_predictor = nn.Sequential(
nn.Linear(2*self.ns if num_conv_layers >= 3 else self.ns,ns),
nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
nn.ReLU(),
nn.Dropout(confidence_dropout),
nn.Linear(ns, ns),
nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
nn.ReLU(),
nn.Dropout(confidence_dropout),
nn.Linear(ns, num_confidence_outputs)
)
else:
# center of mass translation and rotation components
self.center_distance_expansion = GaussianSmearing(0.0, center_max_distance, distance_embed_dim)
self.center_edge_embedding = nn.Sequential(
nn.Linear(distance_embed_dim + sigma_embed_dim, ns),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(ns, ns)
)
self.final_conv = TensorProductConvLayer(
in_irreps=self.lig_conv_layers[-1].out_irreps,
sh_irreps=self.sh_irreps,
out_irreps=f'2x1o + 2x1e',
n_edge_features=2 * ns,
residual=False,
dropout=dropout,
batch_norm=batch_norm
)
self.tr_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns),nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 3))
if not no_torsion:
# torsion angles components
self.final_edge_embedding = nn.Sequential(
nn.Linear(distance_embed_dim, ns),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(ns, ns)
)
self.final_tp_tor = o3.FullTensorProduct(self.sh_irreps, "2e")
self.tor_bond_conv = TensorProductConvLayer(
in_irreps=self.lig_conv_layers[-1].out_irreps,
sh_irreps=self.final_tp_tor.irreps_out,
out_irreps=f'{ns}x0o + {ns}x0e',
n_edge_features=3 * ns,
residual=False,
dropout=dropout,
batch_norm=batch_norm
)
self.tor_final_layer = nn.Sequential(
nn.Linear(2 * ns, ns, bias=False),
nn.Tanh(),
nn.Dropout(dropout),
nn.Linear(ns, 1, bias=False)
)
def forward(self, data):
if not self.confidence_mode:
tr_sigma = self.t_to_sigma(*[data.complex_t[noise_type] for noise_type in ['tr']])
else:
tr_sigma = [data.complex_t[noise_type] for noise_type in ['tr']]
# build ligand node attr (emb + feature)
lig_node_attr = self.build_lig_node_attr(data)
lig_node_attr = self.lig_node_embedding(lig_node_attr)
# build receptor graph
rec_node_attr, rec_edge_index, rec_edge_attr, rec_edge_sh = self.build_rec_conv_graph(data)
rec_src, rec_dst = rec_edge_index
rec_node_attr = self.rec_node_embedding(rec_node_attr)
rec_edge_attr = self.rec_edge_embedding(rec_edge_attr)
# build cross graph
if self.dynamic_max_cross:
cross_cutoff = (tr_sigma * 3 + 20).unsqueeze(1)
else:
cross_cutoff = self.cross_max_distance
cross_edge_index, cross_edge_attr, cross_edge_sh = self.build_cross_conv_graph(data, cross_cutoff)
cross_lig, cross_rec = cross_edge_index
cross_edge_attr = self.cross_edge_embedding(cross_edge_attr)
for l in range(len(self.lig_conv_layers)):
rec_to_lig_edge_attr_ = torch.cat([cross_edge_attr, lig_node_attr[cross_lig, :self.ns], rec_node_attr[cross_rec, :self.ns]], -1)
lig_inter_update = self.rec_to_lig_conv_layers[l](rec_node_attr, cross_edge_index, rec_to_lig_edge_attr_, cross_edge_sh,
out_nodes=lig_node_attr.shape[0])
if l != len(self.lig_conv_layers) - 1:
rec_edge_attr_ = torch.cat([rec_edge_attr, rec_node_attr[rec_src, :self.ns], rec_node_attr[rec_dst, :self.ns]], -1)
rec_intra_update = self.rec_conv_layers[l](rec_node_attr, rec_edge_index, rec_edge_attr_, rec_edge_sh)
lig_to_rec_edge_attr_ = torch.cat([cross_edge_attr, lig_node_attr[cross_lig, :self.ns], rec_node_attr[cross_rec, :self.ns]], -1)
rec_inter_update = self.lig_to_rec_conv_layers[l](lig_node_attr, torch.flip(cross_edge_index, dims=[0]), lig_to_rec_edge_attr_,
cross_edge_sh, out_nodes=rec_node_attr.shape[0])
lig_node_attr = F.pad(lig_node_attr, (0, lig_inter_update.shape[-1] - lig_node_attr.shape[-1]))
lig_node_attr = lig_node_attr + lig_inter_update
if l != len(self.lig_conv_layers) - 1:
rec_node_attr = F.pad(rec_node_attr, (0, rec_intra_update.shape[-1] - rec_node_attr.shape[-1]))
rec_node_attr = rec_node_attr + rec_intra_update + rec_inter_update
# compute confidence score
if self.confidence_mode:
scalar_lig_attr = torch.cat([lig_node_attr[:,:self.ns],lig_node_attr[:,-self.ns:] ], dim=1) if self.num_conv_layers >= 3 else lig_node_attr[:,:self.ns]
confidence = self.confidence_predictor(scatter_mean(scalar_lig_attr, data['ligand'].batch, dim=0)).squeeze(dim=-1)
return confidence
# compute translational score vectors
center_edge_index, center_edge_attr, center_edge_sh = self.build_center_conv_graph(data)
center_edge_attr = self.center_edge_embedding(center_edge_attr)
center_edge_attr = torch.cat([center_edge_attr, lig_node_attr[center_edge_index[1], :self.ns]], -1)
global_pred = self.final_conv(lig_node_attr, center_edge_index, center_edge_attr, center_edge_sh, out_nodes=lig_node_attr.shape[0], scatter_arange = True)
tr_pred = global_pred[:, :3] + global_pred[:, 6:9]
data.graph_sigma_emb = self.timestep_emb_func(data.complex_t['tr'])
tr_norm = torch.linalg.vector_norm(tr_pred, dim=1).unsqueeze(1)
expand_sigma_emb = torch.index_select(data.graph_sigma_emb, dim=0, index=data['ligand'].batch)
tr_pred = tr_pred / tr_norm * self.tr_final_layer(torch.cat([tr_norm, expand_sigma_emb], dim=1))
expand_tr_sigma = torch.index_select(tr_sigma, dim=0, index=data['ligand'].batch)
if self.scale_by_sigma:
tr_pred = tr_pred / expand_tr_sigma.unsqueeze(1)
return tr_pred, expand_tr_sigma, data['ligand'].batch
def build_lig_node_attr(self, data):
data['ligand'].node_sigma_emb = self.timestep_emb_func(data['ligand'].node_t['tr'])
node_attr = torch.cat([data['ligand'].x, data['ligand'].node_sigma_emb], 1)
return node_attr
def build_rec_conv_graph(self, data):
data['receptor'].node_sigma_emb = self.timestep_emb_func(data['receptor'].node_t['tr'])
node_attr = torch.cat([data['receptor'].x, data['receptor'].node_sigma_emb], 1)
edge_index = data['receptor', 'receptor'].edge_index
src, dst = edge_index
edge_vec = data['receptor'].pos[dst.long()] - data['receptor'].pos[src.long()]
edge_length_emb = self.rec_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['receptor'].node_sigma_emb[edge_index[0].long()]
edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return node_attr, edge_index, edge_attr, edge_sh
def build_cross_conv_graph(self, data, cross_distance_cutoff):
if torch.is_tensor(cross_distance_cutoff):
# different cutoff for every graph (depends on the diffusion time)
edge_index = radius(data['receptor'].pos / cross_distance_cutoff[data['receptor'].batch],
data['ligand'].pos / cross_distance_cutoff[data['ligand'].batch], 1,
data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
else:
edge_index = radius(data['receptor'].pos, data['ligand'].pos, cross_distance_cutoff,
data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
src, dst = edge_index
edge_vec = data['receptor'].pos[dst.long()] - data['ligand'].pos[src.long()]
edge_length_emb = self.cross_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['ligand'].node_sigma_emb[src.long()]
edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return edge_index, edge_attr, edge_sh
def build_center_conv_graph(self, data):
edge_index = torch.cat([data['ligand'].batch.unsqueeze(0), torch.arange(len(data['ligand'].batch)).to(data['ligand'].x.device).unsqueeze(0)], dim=0)
center_pos, count = torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device), torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device)
center_pos.index_add_(0, index=data['ligand'].batch, source=data['ligand'].pos)
center_pos = center_pos / torch.bincount(data['ligand'].batch).unsqueeze(1)
edge_vec = data['ligand'].pos[edge_index[1]] - center_pos[edge_index[0]]
edge_attr = self.center_distance_expansion(edge_vec.norm(dim=-1))
edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[1].long()]
edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return edge_index, edge_attr, edge_sh
def build_bond_conv_graph(self, data):
bonds = data['ligand', 'ligand'].edge_index[:, data['ligand'].edge_mask].long()
bond_pos = (data['ligand'].pos[bonds[0]] + data['ligand'].pos[bonds[1]]) / 2
bond_batch = data['ligand'].batch[bonds[0]]
edge_index = radius(data['ligand'].pos, bond_pos, self.lig_max_radius, batch_x=data['ligand'].batch, batch_y=bond_batch)
edge_vec = data['ligand'].pos[edge_index[1]] - bond_pos[edge_index[0]]
edge_attr = self.lig_distance_expansion(edge_vec.norm(dim=-1))
edge_attr = self.final_edge_embedding(edge_attr)
edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
return bonds, edge_index, edge_attr, edge_sh
class GaussianSmearing(torch.nn.Module):
# used to embed the edge distances
def __init__(self, start=0.0, stop=5.0, num_gaussians=50):
super().__init__()
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = -0.5 / (offset[1] - offset[0]).item() ** 2
self.register_buffer('offset', offset)
def forward(self, dist):
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
| Python |
3D | kuangxh9/SuperWater | datasets/esm_embeddings_to_pt.py | .py | 558 | 17 |
import os
from argparse import ArgumentParser
import torch
from tqdm import tqdm
parser = ArgumentParser()
parser.add_argument('--esm_embeddings_path', type=str, default='data/embeddings_output', help='')
parser.add_argument('--output_path', type=str, default='data/esm2_3billion_embeddings.pt', help='')
args = parser.parse_args()
dict = {}
for filename in tqdm(os.listdir(args.esm_embeddings_path)):
dict[filename.split('.')[0]] = torch.load(os.path.join(args.esm_embeddings_path,filename))['representations'][33]
torch.save(dict,args.output_path) | Python |
3D | kuangxh9/SuperWater | datasets/conformer_matching.py | .py | 7,071 | 197 | import copy, time
import numpy as np
from collections import defaultdict
from rdkit import Chem, RDLogger
from rdkit.Chem import AllChem, rdMolTransforms
from rdkit import Geometry
import networkx as nx
from scipy.optimize import differential_evolution
RDLogger.DisableLog('rdApp.*')
"""
Conformer matching routines from Torsional Diffusion
"""
def GetDihedral(conf, atom_idx):
return rdMolTransforms.GetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3])
def SetDihedral(conf, atom_idx, new_vale):
rdMolTransforms.SetDihedralRad(conf, atom_idx[0], atom_idx[1], atom_idx[2], atom_idx[3], new_vale)
def apply_changes(mol, values, rotable_bonds, conf_id):
opt_mol = copy.copy(mol)
[SetDihedral(opt_mol.GetConformer(conf_id), rotable_bonds[r], values[r]) for r in range(len(rotable_bonds))]
return opt_mol
def optimize_rotatable_bonds(mol, true_mol, rotable_bonds, probe_id=-1, ref_id=-1, seed=0, popsize=15, maxiter=500,
mutation=(0.5, 1), recombination=0.8):
opt = OptimizeConformer(mol, true_mol, rotable_bonds, seed=seed, probe_id=probe_id, ref_id=ref_id)
max_bound = [np.pi] * len(opt.rotable_bonds)
min_bound = [-np.pi] * len(opt.rotable_bonds)
bounds = (min_bound, max_bound)
bounds = list(zip(bounds[0], bounds[1]))
# Optimize conformations
result = differential_evolution(opt.score_conformation, bounds,
maxiter=maxiter, popsize=popsize,
mutation=mutation, recombination=recombination, disp=False, seed=seed)
opt_mol = apply_changes(opt.mol, result['x'], opt.rotable_bonds, conf_id=probe_id)
return opt_mol
class OptimizeConformer:
def __init__(self, mol, true_mol, rotable_bonds, probe_id=-1, ref_id=-1, seed=None):
super(OptimizeConformer, self).__init__()
if seed:
np.random.seed(seed)
self.rotable_bonds = rotable_bonds
self.mol = mol
self.true_mol = true_mol
self.probe_id = probe_id
self.ref_id = ref_id
def score_conformation(self, values):
for i, r in enumerate(self.rotable_bonds):
SetDihedral(self.mol.GetConformer(self.probe_id), r, values[i])
return RMSD(self.mol, self.true_mol, self.probe_id, self.ref_id)
def get_torsion_angles(mol):
torsions_list = []
G = nx.Graph()
for i, atom in enumerate(mol.GetAtoms()):
G.add_node(i)
nodes = set(G.nodes())
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
G.add_edge(start, end)
for e in G.edges():
G2 = copy.deepcopy(G)
G2.remove_edge(*e)
if nx.is_connected(G2): continue
l = list(sorted(nx.connected_components(G2), key=len)[0])
if len(l) < 2: continue
n0 = list(G2.neighbors(e[0]))
n1 = list(G2.neighbors(e[1]))
torsions_list.append(
(n0[0], e[0], e[1], n1[0])
)
return torsions_list
# GeoMol
def get_torsions(mol_list):
print('USING GEOMOL GET TORSIONS FUNCTION')
atom_counter = 0
torsionList = []
for m in mol_list:
torsionSmarts = '[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]'
torsionQuery = Chem.MolFromSmarts(torsionSmarts)
matches = m.GetSubstructMatches(torsionQuery)
for match in matches:
idx2 = match[0]
idx3 = match[1]
bond = m.GetBondBetweenAtoms(idx2, idx3)
jAtom = m.GetAtomWithIdx(idx2)
kAtom = m.GetAtomWithIdx(idx3)
for b1 in jAtom.GetBonds():
if (b1.GetIdx() == bond.GetIdx()):
continue
idx1 = b1.GetOtherAtomIdx(idx2)
for b2 in kAtom.GetBonds():
if ((b2.GetIdx() == bond.GetIdx())
or (b2.GetIdx() == b1.GetIdx())):
continue
idx4 = b2.GetOtherAtomIdx(idx3)
# skip 3-membered rings
if (idx4 == idx1):
continue
if m.GetAtomWithIdx(idx4).IsInRing():
torsionList.append(
(idx4 + atom_counter, idx3 + atom_counter, idx2 + atom_counter, idx1 + atom_counter))
break
else:
torsionList.append(
(idx1 + atom_counter, idx2 + atom_counter, idx3 + atom_counter, idx4 + atom_counter))
break
break
atom_counter += m.GetNumAtoms()
return torsionList
def A_transpose_matrix(alpha):
return np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]], dtype=np.double)
def S_vec(alpha):
return np.array([[np.cos(alpha)], [np.sin(alpha)]], dtype=np.double)
def GetDihedralFromPointCloud(Z, atom_idx):
p = Z[list(atom_idx)]
b = p[:-1] - p[1:]
b[0] *= -1
v = np.array([v - (v.dot(b[1]) / b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])
# Normalize vectors
v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1, 1)
b1 = b[1] / np.linalg.norm(b[1])
x = np.dot(v[0], v[1])
m = np.cross(v[0], b1)
y = np.dot(m, v[1])
return np.arctan2(y, x)
def get_dihedral_vonMises(mol, conf, atom_idx, Z):
Z = np.array(Z)
v = np.zeros((2, 1))
iAtom = mol.GetAtomWithIdx(atom_idx[1])
jAtom = mol.GetAtomWithIdx(atom_idx[2])
k_0 = atom_idx[0]
i = atom_idx[1]
j = atom_idx[2]
l_0 = atom_idx[3]
for b1 in iAtom.GetBonds():
k = b1.GetOtherAtomIdx(i)
if k == j:
continue
for b2 in jAtom.GetBonds():
l = b2.GetOtherAtomIdx(j)
if l == i:
continue
assert k != l
s_star = S_vec(GetDihedralFromPointCloud(Z, (k, i, j, l)))
a_mat = A_transpose_matrix(GetDihedral(conf, (k, i, j, k_0)) + GetDihedral(conf, (l_0, i, j, l)))
v = v + np.matmul(a_mat, s_star)
v = v / np.linalg.norm(v)
v = v.reshape(-1)
return np.arctan2(v[1], v[0])
def get_von_mises_rms(mol, mol_rdkit, rotable_bonds, conf_id):
new_dihedrals = np.zeros(len(rotable_bonds))
for idx, r in enumerate(rotable_bonds):
new_dihedrals[idx] = get_dihedral_vonMises(mol_rdkit,
mol_rdkit.GetConformer(conf_id), r,
mol.GetConformer().GetPositions())
mol_rdkit = apply_changes(mol_rdkit, new_dihedrals, rotable_bonds, conf_id)
return RMSD(mol_rdkit, mol, conf_id)
def mmff_func(mol):
mol_mmff = copy.deepcopy(mol)
AllChem.MMFFOptimizeMoleculeConfs(mol_mmff, mmffVariant='MMFF94s')
for i in range(mol.GetNumConformers()):
coords = mol_mmff.GetConformers()[i].GetPositions()
for j in range(coords.shape[0]):
mol.GetConformer(i).SetAtomPosition(j,
Geometry.Point3D(*coords[j]))
RMSD = AllChem.AlignMol
| Python |
3D | kuangxh9/SuperWater | datasets/process_mols.py | .py | 26,058 | 581 | import copy
import os
import warnings
import numpy as np
import scipy.spatial as spa
import torch
from Bio.PDB import PDBParser
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from rdkit import Chem
from rdkit.Chem.rdchem import BondType as BT
from rdkit.Chem import AllChem, GetPeriodicTable, RemoveHs
from rdkit.Geometry import Point3D
from scipy import spatial
from scipy.special import softmax
from torch_cluster import radius_graph
import torch.nn.functional as F
from datasets.conformer_matching import get_torsion_angles, optimize_rotatable_bonds
from utils.torsion import get_transformation_mask
biopython_parser_rec = PDBParser(PERMISSIVE=False)
biopython_parser_rec_lig = PDBParser(PERMISSIVE=True)
periodic_table = GetPeriodicTable()
ligand_amino_acids_file_path = os.getcwd() + '/datasets/pdbid_info_res_names.txt'
def read_ligand_amino_acids(file_path):
with open(file_path, 'r') as file:
lines = file.read().splitlines()
return lines
possible_ligand_amino_acids = read_ligand_amino_acids(ligand_amino_acids_file_path)
allowable_features = {
'possible_atomic_num_list': list(range(1, 119)) + ['misc'],
'possible_chirality_list': [
'CHI_UNSPECIFIED',
'CHI_TETRAHEDRAL_CW',
'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER'
],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_hybridization_list': [
'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'
],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring3_list': [False, True],
'possible_is_in_ring4_list': [False, True],
'possible_is_in_ring5_list': [False, True],
'possible_is_in_ring6_list': [False, True],
'possible_is_in_ring7_list': [False, True],
'possible_is_in_ring8_list': [False, True],
'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU',
'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'] + possible_ligand_amino_acids,
'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD',
'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],
'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2',
'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1',
'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],
}
bonds = {BT.SINGLE: 0, BT.DOUBLE: 1, BT.TRIPLE: 2, BT.AROMATIC: 3}
lig_feature_dims = (list(map(len, [
allowable_features['possible_atomic_num_list'],
allowable_features['possible_chirality_list'],
allowable_features['possible_degree_list'],
allowable_features['possible_formal_charge_list'],
allowable_features['possible_implicit_valence_list'],
allowable_features['possible_numH_list'],
allowable_features['possible_number_radical_e_list'],
allowable_features['possible_hybridization_list'],
allowable_features['possible_is_aromatic_list'],
allowable_features['possible_numring_list'],
allowable_features['possible_is_in_ring3_list'],
allowable_features['possible_is_in_ring4_list'],
allowable_features['possible_is_in_ring5_list'],
allowable_features['possible_is_in_ring6_list'],
allowable_features['possible_is_in_ring7_list'],
allowable_features['possible_is_in_ring8_list'],
])), 0) # number of scalar features
rec_atom_feature_dims = (list(map(len, [
allowable_features['possible_amino_acids'],
allowable_features['possible_atomic_num_list'],
allowable_features['possible_atom_type_2'],
allowable_features['possible_atom_type_3'],
])), 0)
rec_residue_feature_dims = (list(map(len, [
allowable_features['possible_amino_acids']
])), 0)
def lig_atom_featurizer(mol):
ringinfo = mol.GetRingInfo()
atom_features_list = []
for idx, atom in enumerate(mol.GetAtoms()):
atom_features_list.append([
safe_index(allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
allowable_features['possible_chirality_list'].index(str(atom.GetChiralTag())),
safe_index(allowable_features['possible_degree_list'], atom.GetTotalDegree()),
safe_index(allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
safe_index(allowable_features['possible_implicit_valence_list'], atom.GetImplicitValence()),
safe_index(allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
safe_index(allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
safe_index(allowable_features['possible_hybridization_list'], str(atom.GetHybridization())),
allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
safe_index(allowable_features['possible_numring_list'], ringinfo.NumAtomRings(idx)),
allowable_features['possible_is_in_ring3_list'].index(ringinfo.IsAtomInRingOfSize(idx, 3)),
allowable_features['possible_is_in_ring4_list'].index(ringinfo.IsAtomInRingOfSize(idx, 4)),
allowable_features['possible_is_in_ring5_list'].index(ringinfo.IsAtomInRingOfSize(idx, 5)),
allowable_features['possible_is_in_ring6_list'].index(ringinfo.IsAtomInRingOfSize(idx, 6)),
allowable_features['possible_is_in_ring7_list'].index(ringinfo.IsAtomInRingOfSize(idx, 7)),
allowable_features['possible_is_in_ring8_list'].index(ringinfo.IsAtomInRingOfSize(idx, 8)),
])
return torch.tensor(atom_features_list)
def rec_residue_featurizer(rec):
feature_list = []
for residue in rec.get_residues():
feature_list.append([safe_index(allowable_features['possible_amino_acids'], residue.get_resname())])
return torch.tensor(feature_list, dtype=torch.float32) # (N_res, 1)
def safe_index(l, e):
""" Return index of element e in list l. If e is not present, return the last index """
try:
return l.index(e)
except:
return len(l) - 1
def parse_receptor(pdbid, pdbbind_dir):
rec, rec_lig = parsePDB(pdbid, pdbbind_dir)
return rec, rec_lig
def parsePDB(pdbid, pdbbind_dir):
rec_path = os.path.join(pdbbind_dir, pdbid, f'{pdbid}_protein_processed.pdb')
return parse_pdb_from_path(rec_path)
def parse_pdb_from_path(path):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PDBConstructionWarning)
structure_rec = biopython_parser_rec.get_structure('random_id', path)
structure_rec_lig = biopython_parser_rec_lig.get_structure('random_id', path)
rec = structure_rec[0]
rec_lig = structure_rec_lig[0]
return rec, rec_lig
def extract_receptor_structure(rec, rec_lig, lig, lm_embedding_chains=None):
conf = lig.GetConformer()
lig_coords = conf.GetPositions()
min_distances = []
coords = []
all_coords = []
c_alpha_coords = []
n_coords = []
c_coords = []
valid_chain_ids = []
lengths = []
all_coords_test = []
for i, chain in enumerate(rec_lig):
chain_lig_coords_test = []
for res_idx, residue in enumerate(chain):
residue_coord_test = []
for atom in residue:
residue_coord_test.append(list(atom.get_vector()))
chain_lig_coords_test.append(np.array(residue_coord_test))
all_coords_test.append(chain_lig_coords_test)
for i, chain in enumerate(rec):
chain_coords = [] # num_residues, num_atoms, 3
chain_lig_coords = []
chain_c_alpha_coords = []
chain_n_coords = []
chain_c_coords = []
count = 0
invalid_res_ids = []
for res_idx, residue in enumerate(chain):
if residue.get_resname() == 'HOH':
invalid_res_ids.append(residue.get_id())
continue
residue_coords = []
c_alpha, n, c = None, None, None
for atom in residue:
if atom.name == 'CA':
c_alpha = list(atom.get_vector())
if atom.name == 'N':
n = list(atom.get_vector())
if atom.name == 'C':
c = list(atom.get_vector())
residue_coords.append(list(atom.get_vector()))
if c_alpha is not None and n is not None and c is not None:
# only append residue if it is an amino acid and not some weird molecule that is part of the complex
chain_c_alpha_coords.append(c_alpha)
chain_n_coords.append(n)
chain_c_coords.append(c)
chain_coords.append(np.array(residue_coords))
chain_lig_coords.append(np.array(residue_coords))
count += 1
else:
chain_lig_coords.append(np.array(residue_coords))
invalid_res_ids.append(residue.get_id())
for res_id in invalid_res_ids:
chain.detach_child(res_id)
if len(chain_coords) > 0:
all_chain_coords = np.concatenate(chain_coords, axis=0)
distances = spatial.distance.cdist(lig_coords, all_chain_coords)
min_distance = distances.min()
else:
min_distance = np.inf
min_distances.append(min_distance)
lengths.append(count)
coords.append(chain_coords)
all_coords.append(chain_lig_coords)
c_alpha_coords.append(np.array(chain_c_alpha_coords))
n_coords.append(np.array(chain_n_coords))
c_coords.append(np.array(chain_c_coords))
if not count == 0: valid_chain_ids.append(chain.get_id())
min_distances = np.array(min_distances)
if len(valid_chain_ids) == 0:
valid_chain_ids.append(np.argmin(min_distances))
valid_coords = []
valid_c_alpha_coords = []
valid_n_coords = []
valid_c_coords = []
valid_lengths = []
invalid_chain_ids = []
valid_lm_embeddings = []
for i, chain in enumerate(rec):
if chain.get_id() in valid_chain_ids:
valid_coords.append(coords[i])
valid_c_alpha_coords.append(c_alpha_coords[i])
if lm_embedding_chains is not None:
if i >= len(lm_embedding_chains):
raise ValueError('Encountered valid chain id that was not present in the LM embeddings')
valid_lm_embeddings.append(lm_embedding_chains[i])
valid_n_coords.append(n_coords[i])
valid_c_coords.append(c_coords[i])
valid_lengths.append(lengths[i])
else:
invalid_chain_ids.append(chain.get_id())
coords = [item for sublist in valid_coords for item in sublist] # list with n_residues arrays: [n_atoms, 3]
c_alpha_coords = np.concatenate(valid_c_alpha_coords, axis=0) # [n_residues, 3]
n_coords = np.concatenate(valid_n_coords, axis=0) # [n_residues, 3]
c_coords = np.concatenate(valid_c_coords, axis=0) # [n_residues, 3]
lm_embeddings = np.concatenate(valid_lm_embeddings, axis=0) if lm_embedding_chains is not None else None
for invalid_id in invalid_chain_ids:
rec.detach_child(invalid_id)
all_coords = [item for sublist in all_coords for item in sublist]
all_coords_test = [item for sublist in all_coords_test for item in sublist]
assert len(c_alpha_coords) == len(n_coords)
assert len(c_alpha_coords) == len(c_coords)
assert sum(valid_lengths) == len(c_alpha_coords)
return rec, rec_lig, coords, all_coords_test, c_alpha_coords, n_coords, c_coords, lm_embeddings
def get_lig_graph(mol, complex_graph):
lig_coords = torch.from_numpy(mol.GetConformer().GetPositions()).float()
atom_feats = lig_atom_featurizer(mol)
row, col, edge_type = [], [], []
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [bonds[bond.GetBondType()]] if bond.GetBondType() != BT.UNSPECIFIED else [0, 0]
edge_index = torch.tensor([row, col], dtype=torch.long)
edge_type = torch.tensor(edge_type, dtype=torch.long)
edge_attr = F.one_hot(edge_type, num_classes=len(bonds)).to(torch.float)
complex_graph['ligand'].x = atom_feats
complex_graph['ligand'].pos = lig_coords
complex_graph['ligand', 'lig_bond', 'ligand'].edge_index = edge_index
complex_graph['ligand', 'lig_bond', 'ligand'].edge_attr = edge_attr
return
def generate_conformer(mol):
ps = AllChem.ETKDGv2()
id = AllChem.EmbedMolecule(mol, ps)
if id == -1:
print('rdkit coords could not be generated without using random coords. using random coords now.')
ps.useRandomCoords = True
AllChem.EmbedMolecule(mol, ps)
AllChem.MMFFOptimizeMolecule(mol, confId=0)
def get_lig_graph_with_matching(mol_, complex_graph, popsize, maxiter, matching, keep_original, num_conformers, remove_hs):
if matching:
mol_maybe_noh = copy.deepcopy(mol_)
if remove_hs:
mol_maybe_noh = RemoveHs(mol_maybe_noh, sanitize=True)
if keep_original:
complex_graph['ligand'].orig_pos = mol_maybe_noh.GetConformer().GetPositions()
rotable_bonds = get_torsion_angles(mol_maybe_noh)
if not rotable_bonds: print("no_rotable_bonds but still using it")
for i in range(num_conformers):
mol_rdkit = copy.deepcopy(mol_)
mol_rdkit.RemoveAllConformers()
mol_rdkit = AllChem.AddHs(mol_rdkit)
generate_conformer(mol_rdkit)
if remove_hs:
mol_rdkit = RemoveHs(mol_rdkit, sanitize=True)
mol = copy.deepcopy(mol_maybe_noh)
if rotable_bonds:
optimize_rotatable_bonds(mol_rdkit, mol, rotable_bonds, popsize=popsize, maxiter=maxiter)
mol.AddConformer(mol_rdkit.GetConformer())
rms_list = []
AllChem.AlignMolConformers(mol, RMSlist=rms_list)
mol_rdkit.RemoveAllConformers()
mol_rdkit.AddConformer(mol.GetConformers()[1])
if i == 0:
complex_graph.rmsd_matching = rms_list[0]
get_lig_graph(mol_rdkit, complex_graph)
else:
if torch.is_tensor(complex_graph['ligand'].pos):
complex_graph['ligand'].pos = [complex_graph['ligand'].pos]
complex_graph['ligand'].pos.append(torch.from_numpy(mol_rdkit.GetConformer().GetPositions()).float())
else: # no matching
if keep_original:
complex_graph['ligand'].orig_pos = torch.from_numpy(mol_.GetConformer().GetPositions())
complex_graph.rmsd_matching = 0
if remove_hs: mol_ = RemoveHs(mol_)
get_lig_graph(mol_, complex_graph)
edge_mask, mask_rotate = get_transformation_mask(complex_graph)
complex_graph['ligand'].edge_mask = torch.tensor(edge_mask)
complex_graph['ligand'].mask_rotate = mask_rotate
return
def get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, complex_graph, cutoff=20, max_neighbor=None, lm_embeddings=None):
n_rel_pos = n_coords - c_alpha_coords
c_rel_pos = c_coords - c_alpha_coords
num_residues = len(c_alpha_coords)
if num_residues <= 1:
raise ValueError(f"rec contains only 1 residue!")
# Build the k-NN graph
distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)
src_list = []
dst_list = []
mean_norm_list = []
for i in range(num_residues):
dst = list(np.where(distances[i, :] < cutoff)[0])
dst.remove(i)
if max_neighbor != None and len(dst) > max_neighbor:
dst = list(np.argsort(distances[i, :]))[1: max_neighbor + 1]
if len(dst) == 0:
dst = list(np.argsort(distances[i, :]))[1:2] # choose second because first is i itself
print(f'The c_alpha_cutoff {cutoff} was too small for one c_alpha such that it had no neighbors. '
f'So we connected it to the closest other c_alpha')
assert i not in dst
src = [i] * len(dst)
src_list.extend(src)
dst_list.extend(dst)
valid_dist = list(distances[i, dst])
valid_dist_np = distances[i, dst]
sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
diff_vecs = c_alpha_coords[src, :] - c_alpha_coords[dst, :] # (neigh_num, 3)
mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
mean_norm_list.append(mean_vec_ratio_norm)
assert len(src_list) == len(dst_list)
node_feat = rec_residue_featurizer(rec)
mu_r_norm = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
side_chain_vecs = torch.from_numpy(
np.concatenate([np.expand_dims(n_rel_pos, axis=1), np.expand_dims(c_rel_pos, axis=1)], axis=1))
complex_graph['receptor'].x = torch.cat([node_feat, torch.tensor(lm_embeddings)], axis=1) if lm_embeddings is not None else node_feat
complex_graph['receptor'].pos = torch.from_numpy(c_alpha_coords).float()
complex_graph['receptor'].mu_r_norm = mu_r_norm
complex_graph['receptor'].side_chain_vecs = side_chain_vecs.float()
complex_graph['receptor', 'rec_contact', 'receptor'].edge_index = torch.from_numpy(np.asarray([src_list, dst_list]))
return
def rec_atom_featurizer(rec):
atom_feats = []
for i, atom in enumerate(rec.get_atoms()):
atom_name, element = atom.name, atom.element
if element == 'CD':
element = 'C'
assert not element == ''
try:
atomic_num = periodic_table.GetAtomicNumber(element)
except:
atomic_num = -1
atom_feat = [safe_index(allowable_features['possible_amino_acids'], atom.get_parent().get_resname()),
safe_index(allowable_features['possible_atomic_num_list'], atomic_num),
safe_index(allowable_features['possible_atom_type_2'], (atom_name + '*')[:2]),
safe_index(allowable_features['possible_atom_type_3'], atom_name)]
atom_feats.append(atom_feat)
return atom_feats
def get_rec_graph(rec, rec_lig, rec_coords, all_coords, c_alpha_coords, n_coords, c_coords, complex_graph, rec_radius, c_alpha_max_neighbors=None, all_atoms=False,
atom_radius=5, atom_max_neighbors=None, remove_hs=False, lm_embeddings=None):
if all_atoms:
return get_fullrec_graph(rec, rec_lig, rec_coords, all_coords, c_alpha_coords, n_coords, c_coords, complex_graph,
c_alpha_cutoff=rec_radius, c_alpha_max_neighbors=c_alpha_max_neighbors,
atom_cutoff=atom_radius, atom_max_neighbors=atom_max_neighbors, remove_hs=remove_hs,lm_embeddings=lm_embeddings)
else:
return get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, complex_graph, rec_radius, c_alpha_max_neighbors,lm_embeddings=lm_embeddings)
def get_fullrec_graph(rec, rec_lig, rec_coords, all_coords, c_alpha_coords, n_coords, c_coords, complex_graph, c_alpha_cutoff=20,
c_alpha_max_neighbors=None, atom_cutoff=5, atom_max_neighbors=None, remove_hs=False, lm_embeddings=None):
n_rel_pos = n_coords - c_alpha_coords
c_rel_pos = c_coords - c_alpha_coords
num_residues = len(c_alpha_coords)
if num_residues <= 1:
raise ValueError(f"rec contains only 1 residue!")
# Build the k-NN graph of residues
distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)
src_list = []
dst_list = []
mean_norm_list = []
for i in range(num_residues):
dst = list(np.where(distances[i, :] < c_alpha_cutoff)[0])
dst.remove(i)
if c_alpha_max_neighbors != None and len(dst) > c_alpha_max_neighbors:
dst = list(np.argsort(distances[i, :]))[1: c_alpha_max_neighbors + 1]
if len(dst) == 0:
dst = list(np.argsort(distances[i, :]))[1:2] # choose second because first is i itself
print(f'The c_alpha_cutoff {c_alpha_cutoff} was too small for one c_alpha such that it had no neighbors. '
f'So we connected it to the closest other c_alpha')
assert i not in dst
src = [i] * len(dst)
src_list.extend(src)
dst_list.extend(dst)
valid_dist = list(distances[i, dst])
valid_dist_np = distances[i, dst]
sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
assert 1 - 1e-2 < weights[0].sum() < 1.01
diff_vecs = c_alpha_coords[src, :] - c_alpha_coords[dst, :] # (neigh_num, 3)
mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
mean_norm_list.append(mean_vec_ratio_norm)
assert len(src_list) == len(dst_list)
node_feat = rec_residue_featurizer(rec)
mu_r_norm = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
side_chain_vecs = torch.from_numpy(
np.concatenate([np.expand_dims(n_rel_pos, axis=1), np.expand_dims(c_rel_pos, axis=1)], axis=1))
complex_graph['receptor'].x = torch.cat([node_feat, torch.tensor(lm_embeddings)], axis=1) if lm_embeddings is not None else node_feat
complex_graph['receptor'].pos = torch.from_numpy(c_alpha_coords).float()
complex_graph['receptor'].mu_r_norm = mu_r_norm
complex_graph['receptor'].side_chain_vecs = side_chain_vecs.float()
complex_graph['receptor', 'rec_contact', 'receptor'].edge_index = torch.from_numpy(np.asarray([src_list, dst_list]))
src_c_alpha_idx = np.concatenate([np.asarray([i]*len(l)) for i, l in enumerate(all_coords)])
atom_feat = torch.from_numpy(np.asarray(rec_atom_featurizer(rec_lig)))
atom_coords = torch.from_numpy(np.concatenate(all_coords, axis=0)).float()
if remove_hs:
not_hs = (atom_feat[:, 1] != 0)
src_c_alpha_idx = src_c_alpha_idx[not_hs]
atom_feat = atom_feat[not_hs]
atom_coords = atom_coords[not_hs]
atoms_edge_index = radius_graph(atom_coords, atom_cutoff, max_num_neighbors=atom_max_neighbors if atom_max_neighbors else 1000)
atom_res_edge_index = torch.from_numpy(np.asarray([np.arange(len(atom_feat)), src_c_alpha_idx])).long()
complex_graph['atom'].x = atom_feat
complex_graph['atom'].pos = atom_coords
complex_graph['atom', 'atom_contact', 'atom'].edge_index = atoms_edge_index
complex_graph['atom', 'atom_rec_contact', 'receptor'].edge_index = atom_res_edge_index
return
def write_mol_with_coords(mol, new_coords, path):
w = Chem.SDWriter(path)
conf = mol.GetConformer()
for i in range(mol.GetNumAtoms()):
x,y,z = new_coords.astype(np.double)[i]
conf.SetAtomPosition(i,Point3D(x,y,z))
w.write(mol)
w.close()
def read_molecule(molecule_file, sanitize=False, calc_charges=False, remove_hs=False):
if molecule_file.endswith('.mol2'):
mol = Chem.MolFromMol2File(molecule_file, sanitize=False, removeHs=False)
elif molecule_file.endswith('.sdf'):
supplier = Chem.SDMolSupplier(molecule_file, sanitize=False, removeHs=False)
mol = supplier[0]
elif molecule_file.endswith('.pdbqt'):
with open(molecule_file) as file:
pdbqt_data = file.readlines()
pdb_block = ''
for line in pdbqt_data:
pdb_block += '{}\n'.format(line[:66])
mol = Chem.MolFromPDBBlock(pdb_block, sanitize=False, removeHs=False)
elif molecule_file.endswith('.pdb'):
mol = Chem.MolFromPDBFile(molecule_file, sanitize=False, removeHs=False)
else:
raise ValueError('Expect the format of the molecule_file to be '
'one of .mol2, .sdf, .pdbqt and .pdb, got {}'.format(molecule_file))
try:
if sanitize or calc_charges:
Chem.SanitizeMol(mol)
if calc_charges:
# Compute Gasteiger charges on the molecule.
try:
AllChem.ComputeGasteigerCharges(mol)
except:
warnings.warn('Unable to compute charges for the molecule.')
if remove_hs:
mol = Chem.RemoveHs(mol, sanitize=sanitize)
except Exception as e:
print(e)
print("RDKit was unable to read the molecule.")
return None
return mol
def read_sdf_or_mol2(sdf_fileName, mol2_fileName):
mol = Chem.MolFromMolFile(sdf_fileName, sanitize=False)
problem = False
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
except Exception as e:
problem = True
if problem:
mol = Chem.MolFromMol2File(mol2_fileName, sanitize=False)
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
problem = False
except Exception as e:
problem = True
return mol, problem
| Python |
3D | kuangxh9/SuperWater | datasets/pdbbind.py | .py | 13,359 | 262 | import binascii
import glob
import hashlib
import os
import pickle
from collections import defaultdict
from multiprocessing import Pool
import random
import copy
import re
import numpy as np
import torch
from rdkit.Chem import MolToSmiles, MolFromSmiles, AddHs
from torch_geometric.data import Dataset, HeteroData
from torch_geometric.loader import DataLoader, DataListLoader
from torch_geometric.transforms import BaseTransform
from tqdm import tqdm
from datasets.process_mols import read_molecule, get_rec_graph, generate_conformer, \
get_lig_graph_with_matching, extract_receptor_structure, parse_receptor, parse_pdb_from_path
from utils.diffusion_utils import modify_conformer, set_time
from utils.utils import read_strings_from_txt
from utils import so3, torus
class NoiseTransform(BaseTransform):
def __init__(self, t_to_sigma, all_atom):
self.t_to_sigma = t_to_sigma
self.all_atom = all_atom
def __call__(self, data):
t = np.random.uniform()
t_tr = t
return self.apply_noise(data, t_tr)
def apply_noise(self, data, t_tr, tr_update = None):
tr_sigma = self.t_to_sigma(t_tr)
set_time(data, t_tr, 1, self.all_atom, device=None)
tr_update = torch.normal(mean=0, std=tr_sigma, size=data['ligand'].pos.shape) if tr_update is None else tr_update
modify_conformer(data, tr_update)
data.tr_score = -tr_update / tr_sigma ** 2
return data
class PDBBind(Dataset):
def __init__(self, root, transform=None, cache_path='data/cache', split_path='data/', limit_complexes=0,
receptor_radius=30, num_workers=1, c_alpha_max_neighbors=None, popsize=15, maxiter=15,
matching=False, keep_original=False, max_lig_size=None, remove_hs=False, num_conformers=1, all_atoms=False,
atom_radius=5, atom_max_neighbors=None, esm_embeddings_path=None, require_ligand=False,
ligands_list=None, protein_path_list=None, ligand_descriptions=None, keep_local_structures=False):
super(PDBBind, self).__init__(root, transform)
self.pdbbind_dir = root
self.max_lig_size = max_lig_size
self.split_path = split_path
self.limit_complexes = limit_complexes
self.receptor_radius = receptor_radius
self.num_workers = num_workers
self.c_alpha_max_neighbors = c_alpha_max_neighbors
self.remove_hs = remove_hs
self.esm_embeddings_path = esm_embeddings_path
self.require_ligand = require_ligand
self.protein_path_list = protein_path_list
self.ligand_descriptions = ligand_descriptions
self.keep_local_structures = keep_local_structures
if matching or protein_path_list is not None and ligand_descriptions is not None:
cache_path += '_torsion'
if all_atoms:
cache_path += '_allatoms'
self.full_cache_path = os.path.join(cache_path, f'limit{self.limit_complexes}'
f'_INDEX{os.path.splitext(os.path.basename(self.split_path))[0]}'
f'_maxLigSize{self.max_lig_size}_H{int(not self.remove_hs)}'
f'_recRad{self.receptor_radius}_recMax{self.c_alpha_max_neighbors}'
+ ('' if not all_atoms else f'_atomRad{atom_radius}_atomMax{atom_max_neighbors}')
+ ('' if not matching or num_conformers == 1 else f'_confs{num_conformers}')
+ ('' if self.esm_embeddings_path is None else f'_esmEmbeddings')
+ ('' if not keep_local_structures else f'_keptLocalStruct')
+ ('' if protein_path_list is None or ligand_descriptions is None else str(binascii.crc32(''.join(ligand_descriptions + protein_path_list).encode()))))
self.popsize, self.maxiter = popsize, maxiter
self.matching, self.keep_original = matching, keep_original
self.num_conformers = num_conformers
self.all_atoms = all_atoms
self.atom_radius, self.atom_max_neighbors = atom_radius, atom_max_neighbors
if not os.path.exists(os.path.join(self.full_cache_path, "done.txt")):
os.makedirs(self.full_cache_path, exist_ok=True)
self.preprocessing()
self.files = [f for f in os.listdir(self.full_cache_path) if f.endswith('.pt')]
def len(self):
return len(self.files)
def get(self, idx):
file_path = os.path.join(self.full_cache_path, self.files[idx])
data = torch.load(file_path)
return data
def preprocessing(self):
print(f'Processing complexes from [{self.split_path}] and saving it to [{self.full_cache_path}]')
complex_names_all = read_strings_from_txt(self.split_path)
if self.limit_complexes is not None and self.limit_complexes != 0:
complex_names_all = complex_names_all[:self.limit_complexes]
print(f'Loading {len(complex_names_all)} complexes.')
if self.num_workers > 1:
complex_names = complex_names_all
if self.num_workers > 1:
p = Pool(self.num_workers, maxtasksperchild=1)
p.__enter__()
with tqdm(total=len(complex_names), desc=f'loading complexes') as pbar:
map_fn = p.imap if self.num_workers > 1 else map
for complex, lig in map_fn(self.get_complex, complex_names):
full_path = os.path.join(self.full_cache_path, f"{complex[0].name}.pt")
torch.save(complex[0], full_path)
pbar.update()
if self.num_workers > 1: p.__exit__(None, None, None)
else:
complex_names = complex_names_all
with tqdm(total=len(complex_names), desc=f'loading complexes') as pbar:
for complex, lig in map(self.get_complex, complex_names):
full_path = os.path.join(self.full_cache_path, f"{complex[0].name}.pt")
torch.save(complex[0], full_path)
pbar.update()
with open(os.path.join(self.full_cache_path, 'done.txt'), 'w') as file:
file.write("done")
def find_lm_embeddings_chains(self, base_name):
pattern = f"{self.esm_embeddings_path}/{base_name}_chain_*.pt"
file_list = glob.glob(pattern)
file_list.sort(key=lambda x: int(re.search(r"_chain_(\d+)\.pt$", x).group(1)))
lm_embeddings_chains = [torch.load(filename)['representations'][33] for filename in file_list]
return lm_embeddings_chains
def get_complex(self, name):
lm_embedding_chains = self.find_lm_embeddings_chains(name)
if not os.path.exists(os.path.join(self.pdbbind_dir, name)):
print("Folder not found", name)
return [], []
try:
rec_model, rec_lig_model = parse_receptor(name, self.pdbbind_dir)
except Exception as e:
print(f'Skipping {name} because of the error:')
print(e)
return [], []
ligs = read_mols(self.pdbbind_dir, name, remove_hs=False)
complex_graphs = []
failed_indices = []
for i, lig in enumerate(ligs):
if self.max_lig_size is not None and lig.GetNumHeavyAtoms() > self.max_lig_size:
print(f'Ligand with {lig.GetNumHeavyAtoms()} heavy atoms is larger than max_lig_size {self.max_lig_size}. Not including {name} in preprocessed data.')
continue
complex_graph = HeteroData()
complex_graph['name'] = f"{name}"
try:
get_lig_graph_with_matching(lig, complex_graph, self.popsize, self.maxiter, self.matching, self.keep_original,
self.num_conformers, remove_hs=self.remove_hs)
rec, rec_lig, rec_coords, all_coords, c_alpha_coords, n_coords, c_coords, lm_embeddings = extract_receptor_structure(copy.deepcopy(rec_model), copy.deepcopy(rec_lig_model), lig, lm_embedding_chains=lm_embedding_chains)
if lm_embeddings is not None and len(c_alpha_coords) != len(lm_embeddings):
print(f'LM embeddings for complex {name} did not have the right length for the protein. Skipping {name}.')
failed_indices.append(i)
continue
get_rec_graph(rec, rec_lig, rec_coords, all_coords, c_alpha_coords, n_coords, c_coords, complex_graph, rec_radius=self.receptor_radius,
c_alpha_max_neighbors=self.c_alpha_max_neighbors, all_atoms=self.all_atoms,
atom_radius=self.atom_radius, atom_max_neighbors=self.atom_max_neighbors, remove_hs=self.remove_hs, lm_embeddings=lm_embeddings)
except Exception as e:
print(f'Skipping {name} because of the error:')
print(e)
failed_indices.append(i)
continue
protein_center = torch.mean(complex_graph['receptor'].pos, dim=0, keepdim=True)
complex_graph['receptor'].pos -= protein_center
if self.all_atoms:
complex_graph['atom'].pos -= protein_center
if (not self.matching) or self.num_conformers == 1:
complex_graph['ligand'].pos -= protein_center
else:
for p in complex_graph['ligand'].pos:
p -= protein_center
complex_graph.original_center = protein_center
complex_graphs.append(complex_graph)
for idx_to_delete in sorted(failed_indices, reverse=True):
del ligs[idx_to_delete]
return complex_graphs, ligs
def print_statistics(complex_graphs):
statistics = ([], [], [], [])
for complex_graph in complex_graphs:
lig_pos = complex_graph['ligand'].pos if torch.is_tensor(complex_graph['ligand'].pos) else complex_graph['ligand'].pos[0]
radius_protein = torch.max(torch.linalg.vector_norm(complex_graph['receptor'].pos, dim=1))
molecule_center = torch.mean(lig_pos, dim=0)
radius_molecule = torch.max(
torch.linalg.vector_norm(lig_pos - molecule_center.unsqueeze(0), dim=1))
distance_center = torch.linalg.vector_norm(molecule_center)
statistics[0].append(radius_protein)
statistics[1].append(radius_molecule)
statistics[2].append(distance_center)
if "rmsd_matching" in complex_graph:
statistics[3].append(complex_graph.rmsd_matching)
else:
statistics[3].append(0)
name = ['radius protein', 'radius molecule', 'distance protein-mol', 'rmsd matching']
print('Number of complexes: ', len(complex_graphs))
for i in range(4):
array = np.asarray(statistics[i])
print(f"{name[i]}: mean {np.mean(array)}, std {np.std(array)}, max {np.max(array)}")
def construct_loader(args, t_to_sigma):
transform = NoiseTransform(t_to_sigma=t_to_sigma, all_atom=args.all_atoms)
common_args = {'transform': transform, 'root': args.data_dir, 'limit_complexes': args.limit_complexes,
'receptor_radius': args.receptor_radius,
'c_alpha_max_neighbors': args.c_alpha_max_neighbors,
'remove_hs': args.remove_hs, 'max_lig_size': args.max_lig_size,
'popsize': args.matching_popsize, 'maxiter': args.matching_maxiter,
'num_workers': args.num_workers, 'all_atoms': args.all_atoms,
'atom_radius': args.atom_radius, 'atom_max_neighbors': args.atom_max_neighbors,
'esm_embeddings_path': args.esm_embeddings_path}
train_dataset = PDBBind(cache_path=args.cache_path, split_path=args.split_train, keep_original=True,
num_conformers=args.num_conformers, **common_args)
val_dataset = PDBBind(cache_path=args.cache_path, split_path=args.split_val, keep_original=True, **common_args)
loader_class = DataListLoader if torch.cuda.is_available() else DataLoader
train_loader = loader_class(dataset=train_dataset, batch_size=args.batch_size, num_workers=args.num_dataloader_workers, shuffle=True, pin_memory=args.pin_memory)
val_loader = loader_class(dataset=val_dataset, batch_size=args.batch_size, num_workers=args.num_dataloader_workers, shuffle=True, pin_memory=args.pin_memory)
infer_loader = loader_class(dataset=val_dataset, batch_size=1, num_workers=args.num_dataloader_workers, shuffle=False, pin_memory=args.pin_memory)
return train_loader, val_loader, infer_loader
def read_mol(pdbbind_dir, name, remove_hs=False):
lig = read_molecule(os.path.join(pdbbind_dir, name, f'{name}_water.mol2'), remove_hs=remove_hs, sanitize=True)
return lig
def read_mols(pdbbind_dir, name, remove_hs=False):
ligs = []
for file in os.listdir(os.path.join(pdbbind_dir, name)):
if file.endswith(".mol2"):
lig = read_molecule(os.path.join(pdbbind_dir, name, file[:-5] + ".mol2"), remove_hs=remove_hs, sanitize=True)
if lig is not None:
ligs.append(lig)
return ligs | Python |
3D | kuangxh9/SuperWater | datasets/esm_embedding_preparation_water.py | .py | 2,850 | 98 | import os
from argparse import FileType, ArgumentParser
import numpy as np
import pandas as pd
from Bio.PDB import PDBParser
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from tqdm import tqdm
from Bio import SeqIO
parser = ArgumentParser()
parser.add_argument('--out_file', type=str, default="data/prepared_for_esm_water.fasta")
parser.add_argument('--dataset', type=str, default="waterbind")
parser.add_argument('--data_dir', type=str, default='data/waterbind/', help='')
args = parser.parse_args()
biopython_parser = PDBParser()
three_to_one = {'ALA': 'A',
'ARG': 'R',
'ASN': 'N',
'ASP': 'D',
'CYS': 'C',
'GLN': 'Q',
'GLU': 'E',
'GLY': 'G',
'HIS': 'H',
'ILE': 'I',
'LEU': 'L',
'LYS': 'K',
'MET': 'M',
'MSE': 'M', # MSE this is almost the same AA as MET. The sulfur is just replaced by Selen
'PHE': 'F',
'PRO': 'P',
'PYL': 'O',
'SER': 'S',
'SEC': 'U',
'THR': 'T',
'TRP': 'W',
'TYR': 'Y',
'VAL': 'V',
'ASX': 'B',
'GLX': 'Z',
'XAA': 'X',
'XLE': 'J'}
def get_structure_from_file(file_path):
structure = biopython_parser.get_structure('random_id', file_path)
structure = structure[0]
l = []
for i, chain in enumerate(structure):
seq = ''
for res_idx, residue in enumerate(chain):
if residue.get_resname() == 'HOH':
continue
residue_coords = []
c_alpha, n, c = None, None, None
for atom in residue:
if atom.name == 'CA':
c_alpha = list(atom.get_vector())
if atom.name == 'N':
n = list(atom.get_vector())
if atom.name == 'C':
c = list(atom.get_vector())
if c_alpha != None and n != None and c != None: # only append residue if it is an amino acid
try:
seq += three_to_one[residue.get_resname()]
except Exception as e:
seq += '-'
print("encountered unknown AA: ", residue.get_resname(), ' in the complex ', file_path, '. Replacing it with a dash - .')
l.append(seq)
return l
data_dir = args.data_dir
names = os.listdir(data_dir)
sequences = []
ids = []
for name in tqdm(names):
if name == '.DS_Store': continue
if os.path.exists(os.path.join(data_dir, name, f'{name}_protein_processed.pdb')):
rec_path = os.path.join(data_dir, name, f'{name}_protein_processed.pdb')
else:
rec_path = os.path.join(data_dir, name, f'{name}_protein.pdb')
l = get_structure_from_file(rec_path)
for i, seq in enumerate(l):
sequences.append(seq)
ids.append(f'{name[:4]}_chain_{i}')
records = []
for (index, seq) in zip(ids, sequences):
record = SeqRecord(Seq(seq), str(index))
record.description = ''
records.append(record)
SeqIO.write(records, args.out_file, "fasta") | Python |
3D | lvqiujie/Mol2Context-vec | tasks/__init__.py | .py | 0 | 0 | null | Python |
3D | lvqiujie/Mol2Context-vec | tasks/BACE/get_bace_data.py | .py | 4,785 | 161 | import pandas as pd
from sklearn.externals import joblib
import numpy as np
import os
# step 1
filepath="bace/bace.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
w_file = open("bace/bace.smi", mode='w', encoding="utf-8")
all_label = []
all_smi = []
for line in df.values:
# aa = np.array(line[:17], dtype = np.float64)
# a =np.isnan(aa)
all_label.append(line[2])
all_smi.append(line[0])
w_file.write(line[0]+"\n")
w_file.close()
# step 2
adb = "mol2vec corpus -i bace/bace.smi -o bace/bace.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi_0/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("bace/bace_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
index = -1
mols_path = "bace/bace.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, 'bace/label.pkl')
joblib.dump(smi, 'bace/smi.pkl')
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': 'bace/bace_tran.cp_UNK',
'vocab': 'my_smi_0/smi_tran.vocab',
'model_dir': "smi_context_vec_best",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
# 'vocab_size': 121,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 300,
'hidden_units_size': 150,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join(DATA_SET_DIR, parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, 'bace/bace_embed.pkl')
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/BACE/bace_train.py | .py | 12,821 | 282 | # from rdkit import Chem
# from rdkit.Chem import AllChem
import random
from tasks.utils.model import *
from sklearn.externals import joblib
import numpy as np
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
import seaborn as sns
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_len(smi):
mol = Chem.MolFromSmiles(smi)
smiles = Chem.MolToSmiles(mol)
mol = Chem.MolFromSmiles(smiles)
mol_atoms = [a.GetIdx() for a in mol.GetAtoms()]
return len(mol_atoms)
if __name__ == '__main__':
# 设置超参数
input_size = 512
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 2000
batch_size = 32
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
# b = 0.2
all_smi = np.array(joblib.load('bace/smi.pkl'))
y = joblib.load('bace/label.pkl')
x = joblib.load('bace/bace_embed.pkl')
print("data len is ",x.shape[0])
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi, weights = split_data(x, y, all_smi, 3, "bace")
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM(1, task_type="sing", input_size=300, att=True).to(device)
# 设置优化器和损失函数
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
optimizer = torch.optim.SGD(rnn.parameters(),
lr=learning_rate, weight_decay=weight_decay, momentum = momentum)
# optimizer = torch.optim.Adam(list(rnn.parameters())+[matrix1, matrix2, matrix3],
# lr=learning_rate, weight_decay = weight_decay)
# optimizer = torch.optim.RMSprop(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay)
# loss_function = F.cross_entropy
# loss_function = F.nll_loss
# loss_function = nn.CrossEntropyLoss()
# loss_function = nn.BCELoss()
loss_function = nn.BCEWithLogitsLoss().to(device)
# loss_function = FocalLoss(alpha=1 / train_weights[0])
# loss_function = torch.nn.CrossEntropyLoss(torch.Tensor(train_weights).to(device), reduction='mean')
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
y_true_task = []
y_pred_task = []
y_pred_task_score = []
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
# aa = get_delete(tmp_smi[0])
optimizer.zero_grad()
outputs = rnn(tmp_compound)
loss = 0
# tmp_y = F.one_hot(tmp_y, 2).float().to(device)
# print(label_one_hot)
# aa = tmp_y.type(torch.FloatTensor).to(device)
# bb = outputs
# print(outputs.flatten())
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i,x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
# pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
y_pred = torch.sigmoid(y_pred.detach().cpu()).view(-1).numpy()
pred_lable = np.zeros_like(y_pred, dtype=int)
pred_lable[np.where(np.asarray(y_pred) > 0.5)] = 1
y_true_task.extend(y_label.cpu().numpy())
y_pred_task.extend(pred_lable)
y_pred_task_score.extend(y_pred)
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
# cm = [metrics.confusion_matrix(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
trn_roc = metrics.roc_auc_score(y_true_task, y_pred_task_score)
trn_prc = metrics.auc(precision_recall_curve(y_true_task, y_pred_task_score)[1],
precision_recall_curve(y_true_task, y_pred_task_score)[0])
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(len(tasks))]
print("epoch:", epoch," train " "avg_loss:", avg_loss.item(),
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
" train_auc: ", np.array(trn_roc).mean(),
" train_pr: ", np.array(trn_prc).mean())
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
y_true_task = []
y_pred_task = []
y_pred_task_score = []
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
# pred_score = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_score = torch.sigmoid(y_pred.detach().cpu()).view(-1).numpy()
pred_lable = np.zeros_like(pred_score, dtype=int)
pred_lable[np.where(np.asarray(pred_score) > 0.5)] = 1
y_true_task.extend(y_label.cpu().numpy())
y_pred_task.extend(pred_lable)
y_pred_task_score.extend(pred_score)
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
cm = metrics.confusion_matrix(y_true_task, y_pred_task)
trn_roc = metrics.roc_auc_score(y_true_task, y_pred_task_score)
trn_prc = metrics.auc(precision_recall_curve(y_true_task, y_pred_task_score)[1],
precision_recall_curve(y_true_task, y_pred_task_score)[0])
acc = metrics.accuracy_score(y_true_task, y_pred_task)
recall = metrics.recall_score(y_true_task, y_pred_task)
specificity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
print("epoch:", epoch, " val avg_loss:", test_avg_loss,
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
" test_auc: ", np.array(trn_roc).mean(),
" test_pr: ", np.array(trn_prc).mean())
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
PATH = 'bace/lstm_net.pth'
print("test save model")
torch.save(rnn.state_dict(), PATH)
att_flag = False
# if test_avg_loss < 0.6:
# att_flag = True
# print(matrix1, matrix2, matrix3)
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
y_true_task = []
y_pred_task = []
y_pred_task_score = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
if att_flag:
att = alpha_n.cpu().detach().numpy()
for att_i in range(alpha_n.shape[0]):
smi_len = get_len(tmp_smi[att_i])
if smi_len > 40:
continue
att_tmp = att[att_i,:smi_len*2,:smi_len*2]
att_heatmap = att_tmp[1::2, 1::2]
att_heatmap = (att_heatmap - att_heatmap.min()) / (att_heatmap.max() - att_heatmap.min())
# f, (ax1, ax2) = plt.subplots(figsize=(6, 6), nrows=1)
fig = sns.heatmap(att_heatmap, cmap='OrRd')
# plt.show()
scatter_fig = fig.get_figure()
try:
scatter_fig.savefig("bace/att_img/"+str(tmp_smi[att_i])+".png", dpi=400)
except:
continue
finally:
plt.close()
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
# pred_score = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_score = torch.sigmoid(y_pred.detach().cpu()).view(-1).numpy()
pred_lable = np.zeros_like(pred_score, dtype=int)
pred_lable[np.where(np.asarray(pred_score) > 0.5)] = 1
y_true_task.extend(y_label.cpu().numpy())
y_pred_task.extend(pred_lable)
y_pred_task_score.extend(pred_score)
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
cm = metrics.confusion_matrix(y_true_task, y_pred_task)
trn_roc = metrics.roc_auc_score(y_true_task, y_pred_task_score)
trn_prc = metrics.auc(precision_recall_curve(y_true_task, y_pred_task_score)[1],
precision_recall_curve(y_true_task, y_pred_task_score)[0])
acc = metrics.accuracy_score(y_true_task, y_pred_task)
recall = metrics.recall_score(y_true_task, y_pred_task)
specificity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
print("epoch:", epoch, " test avg_loss:", test_avg_loss,
"acc: ", np.array(acc).mean(),
"recall: ", np.array(recall).mean(),
"specificity: ", np.array(specificity).mean(),
" test_auc: ", np.array(trn_roc).mean(),
" test_pr: ", np.array(trn_prc).mean())
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/hiv/hiv_train.py | .py | 16,832 | 408 | import sys
sys.path.append('./')
import os
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import math
import random
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
# from utils.util import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def split_data(x, y, all_smi, k_fold, name):
y = np.array(y)
all_smi = np.array(all_smi)
# save_path = 'hiv/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('hiv/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, True, 100)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
negative_index = np.where(y == 0)[0]
positive_index = np.where(y == 1)[0]
for k, tmp in enumerate(kf.split(negative_index)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(negative_index[train_tmp])
num_t = int(len(test_tmp) / 2)
val_index[k].extend(negative_index[test_tmp[:num_t]])
test_index[k].extend(negative_index[test_tmp[num_t:]])
for k, tmp in enumerate(kf.split(positive_index)):
train_tmp, test_tmp = tmp
train_index[k].extend(positive_index[train_tmp])
num_t = int(len(test_tmp) / 2)
val_index[k].extend(positive_index[test_tmp[:num_t]])
test_index[k].extend(positive_index[test_tmp[num_t:]])
weights = [(len(negative_index) + len(positive_index)) / len(negative_index),
(len(negative_index) + len(positive_index)) / len(positive_index)]
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, name+'/'+str(i+1)+'-fold-index.pkl')
joblib.dump(weights, name + '/weights.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi, weights
# binary class
class CELoss(nn.Module):
def __init__(self, weight=2):
super(CELoss, self).__init__()
self.weight = weight
def forward(self, input, target):
# input:size is M*2. M is the batch number
# target:size is M.
target = target.float()
pt = torch.softmax(input, dim=1)
p = pt[:, 1]
loss = -self.alpha * (1 - p) ** self.gamma * (target * torch.log(p)) - \
(1 - self.alpha) * p ** self.gamma * ((1 - target) * torch.log(1 - p))
# print(loss)
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, input, target):
# input:size is M*2. M is the batch number
# target:size is M.
target = target.float()
pt = torch.softmax(input, dim=1)
p = pt[:, 1]
loss = -self.alpha * (1 - p) ** self.gamma * (target * torch.log(p)) - \
(1 - self.alpha) * p ** self.gamma * ((1 - target) * torch.log(1 - p))
# print(loss)
return loss.mean()
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33,0.33,0.33]), requires_grad=True)
self.fc = nn.Linear(300, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
# bidirectional=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 1)
self.dropout = nn.Dropout(p=0.3)
# self.sig = nn.Sigmoid()
# self.bn1 = nn.BatchNorm1d(1024)
# self.bn2 = nn.BatchNorm1d(512)
# self.bn3 = nn.BatchNorm1d(128)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
return context, alpha_n
def forward(self, x):
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
# x, orderD = pack_sequences(x)
# print(self.matrix[0],self.matrix[1],self.matrix[2])
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
x = self.fc(x.to(device)).to(device)
# changed_length1 = length[orderD]
# x = pack_padded_sequence(x, changed_length1, batch_first=True)
out,(h_n, c_n) = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze().cuda()
# out = out[:,-1,:]
# query = self.dropout(out)
#
# # 加入attention机制
# attn_output, alpha_n = self.attention_net(out, query)
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.relu(out)
# out = self.bn1(F.dropout(out, p=0.3))
# out = self.fc2(out)
# out = F.relu(out)
# out = self.bn2(F.dropout(out, p=0.3))
out = self.fc3(out)
out = F.relu(out)
out = self.dropout(out)
out = self.fc4(out)
# return F.softmax(out,dim=-1)
return out
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 2000
batch_size = 512
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
b = 0.3
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
y = joblib.load('hiv/label.pkl')
x = joblib.load('hiv/hiv_embed.pkl')
all_smi = joblib.load('hiv/smi.pkl')
print("data len is ",x.shape[0])
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi, weights = split_data(x, y, all_smi, 3, "hiv")
# print(weights)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True,drop_last=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM().to(device)
# 设置优化器和损失函数
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay,
# momentum = momentum)
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay)
# loss_function = F.cross_entropy
# loss_function = F.nll_loss
# loss_function = nn.CrossEntropyLoss()
loss_function = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(weights[1]).to(device)).to(device)
# loss_function = [FocalLoss(alpha=1 / w[0]) for w in train_weights]
# loss_function = [torch.nn.CrossEntropyLoss(torch.Tensor(w).to(device), reduction='mean')
# for w in train_weights]
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
y_true = []
y_pred = []
y_pred_score = []
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
optimizer.zero_grad()
outputs = rnn(tmp_compound.to(device))
outputs = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
# print(outputs.shape, tmp_y.shape)
loss = loss_function(outputs.to(device), y_label.float().to(device))
outputs_score = torch.sigmoid(outputs).view(-1)
pred = np.zeros_like(outputs.cpu().detach().numpy(), dtype=int)
pred[np.where(np.asarray(outputs_score.cpu().detach().numpy()) > 0.5)] = 1
y_pred.extend(pred)
y_true.extend(y_label.cpu().numpy())
y_pred_score.extend(outputs_score.cpu().detach().numpy())
# flood = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
cm = metrics.confusion_matrix(y_true, y_pred)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item(),
# "acc: ", metrics.accuracy_score(y_true, y_pred),
# "recall: ", metrics.recall_score(y_true, y_pred),
# "specificity: ", cm[0, 0] / (cm[0, 0] + cm[0, 1]),
" train_auc: ", metrics.roc_auc_score(y_true, y_pred_score))
# 测试集部分换机器后再调试
with torch.no_grad():
rnn.eval()
val_avg_loss = 0
val_sum_loss = 0
y_true = []
y_pred = []
y_pred_score = []
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_sm = tmp
y_true.extend(tmp_y.cpu().numpy())
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
outputs = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
loss = loss_function(outputs, y_label)
pred_score = torch.sigmoid(outputs)
pred = np.zeros_like(outputs.cpu().detach().numpy(), dtype=int)
pred[np.where(np.asarray(pred_score.cpu().detach().numpy()) > 0.5)] = 1
y_pred.extend(pred)
y_pred_score.extend(pred_score.cpu().detach().numpy())
val_sum_loss += loss.item()
val_avg_loss = val_sum_loss / (index + 1)
cm = metrics.confusion_matrix(y_true, y_pred)
print("epoch:", epoch," val ", "avg_loss: ", val_avg_loss,
"acc: ", metrics.accuracy_score(y_true, y_pred),
# "recall: ", metrics.recall_score(y_true, y_pred),
# "specificity: ", cm[0,0]/(cm[0,0]+cm[0,1]),
# "sensitivity: ", cm[1,1]/(cm[1,0]+cm[1,1]),
" val_auc: ", metrics.roc_auc_score(y_true, y_pred_score))
# 保存模型
if val_avg_loss < test_best_loss:
test_best_loss = val_avg_loss
PATH = 'hiv/lstm_net.pth'
print("test save model")
torch.save(rnn.state_dict(), PATH)
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
y_true_task = []
y_pred_task = []
y_pred_task_score = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
# pred_score = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_score = torch.sigmoid(y_pred.detach().cpu()).view(-1).numpy()
pred_lable = np.zeros_like(pred_score, dtype=int)
pred_lable[np.where(np.asarray(pred_score) > 0.5)] = 1
y_true_task.extend(y_label.cpu().numpy())
y_pred_task.extend(pred_lable)
y_pred_task_score.extend(pred_score)
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
cm = metrics.confusion_matrix(y_true_task, y_pred_task)
trn_roc = metrics.roc_auc_score(y_true_task, y_pred_task_score)
trn_prc = metrics.auc(precision_recall_curve(y_true_task, y_pred_task_score)[1],
precision_recall_curve(y_true_task, y_pred_task_score)[0])
acc = metrics.accuracy_score(y_true_task, y_pred_task)
recall = metrics.recall_score(y_true_task, y_pred_task)
specificity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
print("epoch:", epoch, " test avg_loss:", test_avg_loss,
"acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
" test_auc: ", np.array(trn_roc).mean(),
" test_pr: ", np.array(trn_prc).mean())
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/hiv/get_hiv_data.py | .py | 4,809 | 163 | import sys
sys.path.append('./')
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import os
# step 1
filepath="hiv/hiv.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
w_file = open("hiv/hiv.smi", mode='w', encoding="utf-8")
all_label = []
all_smi = []
for line in df.values:
smi = line[0]
if len(smi) <= 0:
continue
# aa = np.array(line[:17], dtype = np.float64)
# a =np.isnan(aa)
all_label.append(line[2])
all_smi.append(smi)
w_file.write(smi+"\n")
w_file.close()
# step 2
adb = "mol2vec corpus -i hiv/hiv.smi -o hiv/hiv.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("hiv/hiv_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
index = -1
mols_path = "hiv/hiv.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, 'hiv/label.pkl')
joblib.dump(smi, 'hiv/smi.pkl')
# step 4
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': 'hiv/hiv_tran.cp_UNK',
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_best",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 1,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': True,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 400,
'hidden_units_size': 200,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join("data/datasets", parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, 'hiv/hiv_embed.pkl')
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/utils/model.py | .py | 4,092 | 111 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import math
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, out_num, input_size=300, task_type='sing', att=False):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.input_size = input_size
self.out_num = out_num * 2 if "muti".__eq__(task_type) else out_num
self.att = att
self.fc = nn.Linear(self.input_size, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
# bidirectional=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, self.out_num)
self.dropout = nn.Dropout(p=0.3)
# self.sig = nn.Sigmoid()
# self.bn1 = nn.BatchNorm1d(1024)
# self.bn2 = nn.BatchNorm1d(512)
# self.bn3 = nn.BatchNorm1d(128)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
return context, alpha_n
def forward(self, x):
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
# x, orderD = pack_sequences(x)
# print(self.matrix[0],self.matrix[1],self.matrix[2])
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
x = self.fc(x.to(device)).to(device)
# changed_length1 = length[orderD]
# x = pack_padded_sequence(x, changed_length1, batch_first=True)
out,(h_n, c_n) = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
if self.att:
query = self.dropout(out)
# 加入attention机制
out, alpha_n = self.attention_net(out, query)
else:
out = torch.mean(out,dim=1).squeeze().cuda()
# out = out[:,-1,:]
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.relu(out)
# out = self.bn1(F.dropout(out, p=0.3))
# out = self.fc2(out)
# out = F.relu(out)
# out = self.bn2(F.dropout(out, p=0.3))
out = self.fc3(out)
out = F.relu(out)
out = self.dropout(out)
out = self.fc4(out)
# return F.softmax(out,dim=-1)
return out
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item]
def __len__(self):
return len(self.compound) | Python |
3D | lvqiujie/Mol2Context-vec | tasks/utils/__init__.py | .py | 0 | 0 | null | Python |
3D | lvqiujie/Mol2Context-vec | tasks/utils/util.py | .py | 6,345 | 135 | import sys
sys.path.append('./')
import numpy as np
from sklearn.model_selection import KFold
from sklearn.externals import joblib
def split_data(x, y, all_smi, k_fold, name):
y = np.array(y)
all_smi = np.array(all_smi)
# save_path = 'hiv/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('hiv/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, True, 100)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
negative_index = np.where(y == 0)[0]
positive_index = np.where(y == 1)[0]
for k, tmp in enumerate(kf.split(negative_index)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(negative_index[train_tmp])
num_t = int(len(test_tmp) / 2)
val_index[k].extend(negative_index[test_tmp[:num_t]])
test_index[k].extend(negative_index[test_tmp[num_t:]])
for k, tmp in enumerate(kf.split(positive_index)):
train_tmp, test_tmp = tmp
train_index[k].extend(positive_index[train_tmp])
num_t = int(len(test_tmp) / 2)
val_index[k].extend(positive_index[test_tmp[:num_t]])
test_index[k].extend(positive_index[test_tmp[num_t:]])
weights = [(len(negative_index) + len(positive_index)) / len(negative_index),
(len(negative_index) + len(positive_index)) / len(positive_index)]
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, name+'/'+str(i+1)+'-fold-index.pkl')
joblib.dump(weights, name + '/weights.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi, weights
def split_multi_label(x, y, smi, k_fold, name):
y = np.array(y).astype(float)
# y[np.where(np.isnan(y))] = 6
all_smi = np.array(smi)
# save_path = 'tox/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('tox/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, False, 100)
all_train_index = [[],[],[],[],[]]
all_train_index_weights = [[] for i in range(y.shape[1])]
all_val_index = [[],[],[],[],[]]
all_test_index = [[],[],[],[],[]]
for task_index in range(y.shape[-1]):
negative_index = np.where(y[:, task_index] == 0)[0]
positive_index = np.where(y[:, task_index] == 1)[0]
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(negative_index)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(negative_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(negative_index[test_tmp[:num_t]])
test_index[k].extend(negative_index[test_tmp[num_t:]])
for k, tmp in enumerate(kf.split(positive_index)):
train_tmp, test_tmp = tmp
train_index[k].extend(positive_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(positive_index[test_tmp[:num_t]])
test_index[k].extend(positive_index[test_tmp[num_t:]])
all_train_index_weights[task_index] = [(len(negative_index) + len(positive_index)) / len(negative_index),
(len(negative_index) + len(positive_index)) / len(positive_index)]
if task_index == 0:
all_train_index = train_index
all_val_index = val_index
all_test_index = test_index
else:
all_train_index = [list(set(all_train_index[i]).union(set(t))) for i, t in enumerate(train_index)]
all_val_index = [list(set(all_val_index[i]).union(set(t))) for i, t in enumerate(val_index)]
all_test_index = [list(set(all_test_index[i]).union(set(t))) for i, t in enumerate(test_index)]
for i in range(5):
joblib.dump({"train_index":all_train_index[i],
"val_index": all_val_index[i],
"test_index": all_test_index[i],
}, name+'/'+str(i+1)+'-fold-index.pkl')
joblib.dump(all_train_index_weights, name+'/weights.pkl')
train_split_x = x[all_train_index[k_fold]]
train_split_y = y[all_train_index[k_fold]]
train_split_smi = all_smi[all_train_index[k_fold]]
val_split_x = x[all_val_index[k_fold]]
val_split_y = y[all_val_index[k_fold]]
val_split_smi = all_smi[all_val_index[k_fold]]
test_split_x = x[all_test_index[k_fold]]
test_split_y = y[all_test_index[k_fold]]
test_split_smi = all_smi[all_test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi, all_train_index_weights
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/FreeSolv/train2.py | .py | 35,119 | 793 | from rdkit import Chem
import torch
import os
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
# from paper_data.plot_morgan import main
import numpy as np
import seaborn as sns
import math
import pickle
import random
from rdkit.Chem import MolFromSmiles
from AttentiveFP.Featurizer import *
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.optim as optim
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
p_dropout = 0.2
fingerprint_dim = 200
# also known as l2_regularization_lambda
weight_decay = 5
learning_rate = 2.5
# for regression model
output_units_num = 1
radius = 2
T = 2
smilesList = ['CC']
degrees = [0, 1, 2, 3, 4, 5]
raw_filename = "FreeSolv/SAMPL.csv"
feature_filename = raw_filename.replace('.csv', '.pickle')
filename = raw_filename.replace('.csv', '')
prefix_filename = raw_filename.split('/')[-1].replace('.csv', '')
smiles_tasks_df = pd.read_csv(raw_filename)
smilesList = smiles_tasks_df.smiles.values
print("number of all smiles: ", len(smilesList))
atom_num_dist = []
remained_smiles = []
canonical_smiles_list = []
for smiles in smilesList:
try:
mol = Chem.MolFromSmiles(smiles)
atom_num_dist.append(len(mol.GetAtoms()))
remained_smiles.append(smiles)
canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))
except:
print(smiles, "######3")
pass
feature_filename = 'FreeSolv/SAMPL2'
# if os.path.isfile(feature_filename):
# print("NO esol/delaney-processed.pickle")
# feature_dicts = pickle.load(open(feature_filename, "rb"))
# else:
feature_dicts = save_smiles_dicts(smilesList, feature_filename)
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(
[canonical_smiles_list[0]], feature_dicts)
num_atom_features = x_atom.shape[-1]
num_bond_features = x_bonds.shape[-1]
class MolGraph(object):
def __init__(self):
self.nodes = {} # dict of lists of nodes, keyed by node type
def new_node(self, ntype, features=None, rdkit_ix=None):
new_node = Node(ntype, features, rdkit_ix)
self.nodes.setdefault(ntype, []).append(new_node)
return new_node
def add_subgraph(self, subgraph):
old_nodes = self.nodes
new_nodes = subgraph.nodes
for ntype in set(old_nodes.keys()) | set(new_nodes.keys()):
old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, []))
def sort_nodes_by_degree(self, ntype):
nodes_by_degree = {i : [] for i in degrees}
for node in self.nodes[ntype]:
nodes_by_degree[len(node.get_neighbors(ntype))].append(node)
new_nodes = []
for degree in degrees:
cur_nodes = nodes_by_degree[degree]
self.nodes[(ntype, degree)] = cur_nodes
new_nodes.extend(cur_nodes)
self.nodes[ntype] = new_nodes
def feature_array(self, ntype):
assert ntype in self.nodes
return np.array([node.features for node in self.nodes[ntype]])
def rdkit_ix_array(self):
return np.array([node.rdkit_ix for node in self.nodes['atom']])
def neighbor_list(self, self_ntype, neighbor_ntype):
assert self_ntype in self.nodes and neighbor_ntype in self.nodes
neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])}
return [[neighbor_idxs[neighbor]
for neighbor in self_node.get_neighbors(neighbor_ntype)]
for self_node in self.nodes[self_ntype]]
class Node(object):
__slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix']
def __init__(self, ntype, features, rdkit_ix):
self.ntype = ntype
self.features = features
self._neighbors = []
self.rdkit_ix = rdkit_ix
def add_neighbors(self, neighbor_list):
for neighbor in neighbor_list:
self._neighbors.append(neighbor)
neighbor._neighbors.append(self)
def get_neighbors(self, ntype):
return [n for n in self._neighbors if n.ntype == ntype]
def graph_from_smiles(smiles):
graph = MolGraph()
mol = MolFromSmiles(smiles)
if not mol:
raise ValueError("Could not parse SMILES string:", smiles)
atoms_by_rd_idx = {}
for atom in mol.GetAtoms():
new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx())
atoms_by_rd_idx[atom.GetIdx()] = new_atom_node
for bond in mol.GetBonds():
atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
new_bond_node = graph.new_node('bond', features=bond_features(bond))
new_bond_node.add_neighbors((atom1_node, atom2_node))
atom1_node.add_neighbors((atom2_node,))
mol_node = graph.new_node('molecule')
mol_node.add_neighbors(graph.nodes['atom'])
return graph
def array_rep_from_smiles(molgraph):
"""Precompute everything we need from MolGraph so that we can free the memory asap."""
#molgraph = graph_from_smiles_tuple(tuple(smiles))
degrees = [0,1,2,3,4,5]
arrayrep = {'atom_features' : molgraph.feature_array('atom'),
'bond_features' : molgraph.feature_array('bond'),
'atom_list' : molgraph.neighbor_list('molecule', 'atom'),
'rdkit_ix' : molgraph.rdkit_ix_array()}
for degree in degrees:
arrayrep[('atom_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int)
arrayrep[('bond_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int)
return arrayrep
def gen_descriptor_data(smilesList):
smiles_to_fingerprint_array = {}
for i, smiles in enumerate(smilesList):
# if i > 5:
# print("Due to the limited computational resource, submission with more than 5 molecules will not be processed")
# break
smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True)
try:
molgraph = graph_from_smiles(smiles)
molgraph.sort_nodes_by_degree('atom')
arrayrep = array_rep_from_smiles(molgraph)
smiles_to_fingerprint_array[smiles] = arrayrep
except:
print(smiles,"%%%%%%%%")
# time.sleep(3)
return smiles_to_fingerprint_array
def save_smiles_dicts(smilesList, filename):
# first need to get the max atom length
max_atom_len = 0
max_bond_len = 0
num_atom_features = 0
num_bond_features = 0
smiles_to_rdkit_list = {}
smiles_to_fingerprint_features = gen_descriptor_data(smilesList)
for smiles, arrayrep in smiles_to_fingerprint_features.items():
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
rdkit_list = arrayrep['rdkit_ix']
smiles_to_rdkit_list[smiles] = rdkit_list
atom_len, num_atom_features = atom_features.shape
bond_len, num_bond_features = bond_features.shape
if atom_len > max_atom_len:
max_atom_len = atom_len
if bond_len > max_bond_len:
max_bond_len = bond_len
# then add 1 so I can zero pad everything
max_atom_index_num = max_atom_len
max_bond_index_num = max_bond_len
max_atom_len += 1
max_bond_len += 1
smiles_to_atom_info = {}
smiles_to_bond_info = {}
smiles_to_atom_neighbors = {}
smiles_to_bond_neighbors = {}
smiles_to_atom_mask = {}
degrees = [0, 1, 2, 3, 4, 5]
# then run through our numpy array again
for smiles, arrayrep in smiles_to_fingerprint_features.items():
mask = np.zeros((max_atom_len))
# get the basic info of what
# my atoms and bonds are initialized
atoms = np.zeros((max_atom_len, num_atom_features))
bonds = np.zeros((max_bond_len, num_bond_features))
# then get the arrays initlialized for the neighbors
atom_neighbors = np.zeros((max_atom_len, len(degrees)))
bond_neighbors = np.zeros((max_atom_len, len(degrees)))
# now set these all to the last element of the list, which is zero padded
atom_neighbors.fill(max_atom_index_num)
bond_neighbors.fill(max_bond_index_num)
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
for i, feature in enumerate(atom_features):
mask[i] = 1.0
atoms[i] = feature
for j, feature in enumerate(bond_features):
bonds[j] = feature
atom_neighbor_count = 0
bond_neighbor_count = 0
working_atom_list = []
working_bond_list = []
for degree in degrees:
atom_neighbors_list = arrayrep[('atom_neighbors', degree)]
bond_neighbors_list = arrayrep[('bond_neighbors', degree)]
if len(atom_neighbors_list) > 0:
for i, degree_array in enumerate(atom_neighbors_list):
for j, value in enumerate(degree_array):
atom_neighbors[atom_neighbor_count, j] = value
atom_neighbor_count += 1
if len(bond_neighbors_list) > 0:
for i, degree_array in enumerate(bond_neighbors_list):
for j, value in enumerate(degree_array):
bond_neighbors[bond_neighbor_count, j] = value
bond_neighbor_count += 1
# then add everything to my arrays
smiles_to_atom_info[smiles] = atoms
smiles_to_bond_info[smiles] = bonds
smiles_to_atom_neighbors[smiles] = atom_neighbors
smiles_to_bond_neighbors[smiles] = bond_neighbors
smiles_to_atom_mask[smiles] = mask
del smiles_to_fingerprint_features
feature_dicts = {}
# feature_dicts['smiles_to_atom_mask'] = smiles_to_atom_mask
# feature_dicts['smiles_to_atom_info']= smiles_to_atom_info
feature_dicts = {
'smiles_to_atom_mask': smiles_to_atom_mask,
'smiles_to_atom_info': smiles_to_atom_info,
'smiles_to_bond_info': smiles_to_bond_info,
'smiles_to_atom_neighbors': smiles_to_atom_neighbors,
'smiles_to_bond_neighbors': smiles_to_bond_neighbors,
'smiles_to_rdkit_list': smiles_to_rdkit_list
}
pickle.dump(feature_dicts, open(filename + '.pickle', "wb"))
print('feature dicts file saved as ' + filename + '.pickle')
return feature_dicts
def split_data(x, y, all_smi, lens, k_fold):
y = np.array(y, dtype=np.float64)
all_smi = np.array(all_smi)
lens = np.array(lens)
# save_path = 'FreeSolv/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('FreeSolv/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(4, True, 8)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'FreeSolv/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
train_split_lens = lens[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
val_split_lens = lens[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
test_split_lens = lens[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi,val_split_lens,\
test_split_x, test_split_y, test_split_smi,test_split_lens
def get_smiles_array(smilesList, feature_dicts):
x_mask = []
x_atom = []
x_bonds = []
x_atom_index = []
x_bond_index = []
for smiles in smilesList:
x_mask.append(feature_dicts['smiles_to_atom_mask'][smiles])
x_atom.append(feature_dicts['smiles_to_atom_info'][smiles])
x_bonds.append(feature_dicts['smiles_to_bond_info'][smiles])
x_atom_index.append(feature_dicts['smiles_to_atom_neighbors'][smiles])
x_bond_index.append(feature_dicts['smiles_to_bond_neighbors'][smiles])
return np.asarray(x_atom),np.asarray(x_bonds),np.asarray(x_atom_index),\
np.asarray(x_bond_index),np.asarray(x_mask),feature_dicts['smiles_to_rdkit_list']
class Fingerprint(nn.Module):
def __init__(self, radius, T, input_feature_dim, input_bond_dim, \
fingerprint_dim, output_units_num, p_dropout):
super(Fingerprint, self).__init__()
# graph attention for atom embedding
self.atom_fc = nn.Linear(input_feature_dim, fingerprint_dim)
self.neighbor_fc = nn.Linear(input_feature_dim + input_bond_dim, fingerprint_dim)
self.GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for r in range(radius)])
self.align = nn.ModuleList([nn.Linear(2 * fingerprint_dim, 1) for r in range(radius)])
self.attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for r in range(radius)])
# graph attention for molecule embedding
self.mol_GRUCell = nn.GRUCell(fingerprint_dim, fingerprint_dim)
self.mol_align = nn.Linear(2 * fingerprint_dim, 1)
self.mol_attend = nn.Linear(fingerprint_dim, fingerprint_dim)
# you may alternatively assign a different set of parameter in each attentive layer for molecule embedding like in atom embedding process.
# self.mol_GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for t in range(T)])
# self.mol_align = nn.ModuleList([nn.Linear(2*fingerprint_dim,1) for t in range(T)])
# self.mol_attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for t in range(T)])
self.dropout = nn.Dropout(p=p_dropout)
self.output = nn.Linear(fingerprint_dim, output_units_num)
self.radius = radius
self.T = T
def forward(self, atom_list, bond_list, atom_degree_list, bond_degree_list, atom_mask):
atom_mask = atom_mask.unsqueeze(2)
batch_size, mol_length, num_atom_feat = atom_list.size()
atom_feature = F.leaky_relu(self.atom_fc(atom_list))
bond_neighbor = [bond_list[i][bond_degree_list[i]] for i in range(batch_size)]
bond_neighbor = torch.stack(bond_neighbor, dim=0)
atom_neighbor = [atom_list[i][atom_degree_list[i]] for i in range(batch_size)]
atom_neighbor = torch.stack(atom_neighbor, dim=0)
# then concatenate them
neighbor_feature = torch.cat([atom_neighbor, bond_neighbor], dim=-1)
neighbor_feature = F.leaky_relu(self.neighbor_fc(neighbor_feature))
# generate mask to eliminate the influence of blank atoms
attend_mask = atom_degree_list.clone()
attend_mask[attend_mask != mol_length - 1] = 1
attend_mask[attend_mask == mol_length - 1] = 0
attend_mask = attend_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
softmax_mask = atom_degree_list.clone()
softmax_mask[softmax_mask != mol_length - 1] = 0
softmax_mask[softmax_mask == mol_length - 1] = -9e8 # make the softmax value extremly small
softmax_mask = softmax_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
batch_size, mol_length, max_neighbor_num, fingerprint_dim = neighbor_feature.shape
atom_feature_expand = atom_feature.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[0](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[0](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = atom_feature.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[0](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
for d in range(self.radius - 1):
# bonds_indexed = [bond_list[i][torch.cuda.LongTensor(bond_degree_list)[i]] for i in range(batch_size)]
neighbor_feature = [activated_features[i][atom_degree_list[i]] for i in range(batch_size)]
# neighbor_feature is a list of 3D tensor, so we need to stack them into a 4D tensor first
neighbor_feature = torch.stack(neighbor_feature, dim=0)
atom_feature_expand = activated_features.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[d + 1](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[d + 1](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
# atom_feature_reshape = atom_feature.view(batch_size*mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[d + 1](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
mol_feature = torch.sum(activated_features * atom_mask, dim=-2)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_softmax_mask = atom_mask.clone()
mol_softmax_mask[mol_softmax_mask == 0] = -9e8
mol_softmax_mask[mol_softmax_mask == 1] = 0
mol_softmax_mask = mol_softmax_mask.type(torch.cuda.FloatTensor)
for t in range(self.T):
mol_prediction_expand = activated_features_mol.unsqueeze(-2).expand(batch_size, mol_length, fingerprint_dim)
mol_align = torch.cat([mol_prediction_expand, activated_features], dim=-1)
mol_align_score = F.leaky_relu(self.mol_align(mol_align))
mol_align_score = mol_align_score + mol_softmax_mask
mol_attention_weight = F.softmax(mol_align_score, -2)
mol_attention_weight = mol_attention_weight * atom_mask
# print(mol_attention_weight.shape,mol_attention_weight)
activated_features_transform = self.mol_attend(self.dropout(activated_features))
# aggregate embeddings of atoms in a molecule
mol_context = torch.sum(torch.mul(mol_attention_weight, activated_features_transform), -2)
# print(mol_context.shape,mol_context)
mol_context = F.elu(mol_context)
mol_feature = self.mol_GRUCell(mol_context, mol_feature)
# print(mol_feature.shape,mol_feature)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_prediction = self.output(self.dropout(mol_feature))
return atom_feature, mol_prediction, mol_feature
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, model):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.model = model
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
#
# # self.fc1 = nn.Linear(512, 1024)
# # self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024 , 512)
self.fc4 = nn.Linear(512+ 200, 1)
# self.fc5 = nn.Linear(200, 1)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x, x_lens, tmp_smi):
# print(self.matrix1, self.matrix2, self.matrix3)
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
#
x = self.fc(x.to(device)).to(device)
# packing
# embed_packed = pack_padded_sequence(x, x_lens,
# batch_first=True,
# enforce_sorted=False)
out, (hidden, cell) = self.lstm(x) #h_state是之前的隐层状态
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
feature_dicts)
atoms_prediction, mol_prediction, mol_feature = self.model(torch.Tensor(x_atom).to(device),
torch.Tensor(x_bonds).to(device),
torch.cuda.LongTensor(x_atom_index),
torch.cuda.LongTensor(x_bond_index),
torch.Tensor(x_mask).to(device))
# unpacking
# out, lens = pad_packed_sequence(out, batch_first=True)
alpha_n =0
att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out_tmp = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
out_tmp = self.fc3(out_tmp)
out_tmp = F.leaky_relu(out_tmp)
out_tmp = self.dropout(out_tmp)
out_tmp = torch.cat((out_tmp.view(-1, 512), mol_feature.view(-1, 200)), dim=1)
out_tmp = self.fc4(out_tmp)
# out_tmp = self.fc5(mol_feature)
# outputs = []
# for i, out_tmp in enumerate(out):
# # out_tmp = torch.mean(out_tmp[:lens[i],:], dim=0).squeeze()
# out_tmp = out_tmp[lens[i]-1,:]
# out_tmp = self.fc3(out_tmp)
# out_tmp = F.leaky_relu(out_tmp)
# out_tmp = self.dropout(out_tmp)
# out_tmp = self.fc4(out_tmp)
# outputs.append(out_tmp)
# out = torch.stack(outputs, dim=0)
return out_tmp, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi, len):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
self.len = len
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item], self.len[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.001 # 定义超参数学习率
epoch_num = 1000
batch_size = 16
best_loss = 100000
test_best_loss = 100000
weight_decay = 0
momentum = 0.9
b = 0.04
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# filepath = "FreeSolv/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('FreeSolv/label.pkl')
all_smi = np.array(joblib.load('FreeSolv/smi.pkl'))
x = joblib.load('FreeSolv/FreeSolv_embed.pkl')
lens = joblib.load('FreeSolv/lens.pkl')
# 5-Fold
train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi, val_split_lens,\
test_split_x, test_split_y, test_split_smi, test_split_lens = split_data(x, y, all_smi, lens, 1)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi, train_split_lens)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi, val_split_lens)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi, test_split_lens)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
data_all = MyDataset(x, y, all_smi, lens)
dataset_all = data.DataLoader(dataset=data_all, batch_size=1, shuffle=True)
model = Fingerprint(radius, T, num_atom_features, num_bond_features,
fingerprint_dim, output_units_num, p_dropout)
model.to(device)
rnn = LSTM(model).to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters()), lr=learning_rate, weight_decay=weight_decay)
optimizer = torch.optim.SGD(list(rnn.parameters()),
lr=learning_rate, weight_decay=weight_decay,
momentum=momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
# print(task_matrix[0], task_matrix[1], task_matrix[2])
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
optimizer.zero_grad()
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device),tmp_smi)
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'esol/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[1], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'esol/lstm_net.pth')
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
loss = 0
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(
# tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device),tmp_smi)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mse = mean_squared_error(all_label, all_pred)
mae = mean_absolute_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," mae : ", mae
," rmse : ", rmse)
if rmse < 0.8:
# print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
rnn.eval()
for index, tmp in enumerate(dataset_all):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(
# tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
print(outputs.cpu().detach().numpy()[0][0], tmp_y.cpu().detach().numpy()[0], tmp_smi[0])
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/FreeSolv/train.py | .py | 14,327 | 386 | from rdkit import Chem
import torch
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
# from paper_data.plot_morgan import main
import numpy as np
import seaborn as sns
import math
import random
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_len(smi):
mol = Chem.MolFromSmiles(smi)
smiles = Chem.MolToSmiles(mol)
mol = Chem.MolFromSmiles(smiles)
mol_atoms = [a.GetIdx() for a in mol.GetAtoms()]
return len(mol_atoms)
def pack_sequences(X, order=None):
lengths = np.array([x.shape[0] for x in X])
features = X[0].shape[1]
n = len(X)
if order is None:
order = np.argsort(lengths)[::-1] # 从后向前取反向的元素
m = max(lengths)
X_block = X[0].new(n, m, features).zero_()
for i in range(n):
j = order[i]
x = X[j]
X_block[i, :len(x), :] = x
return X_block, order
def unpack_sequences(X, order):
X, lengths = pad_packed_sequence(X, batch_first=True)
X_block = torch.zeros(size=X.size()).to(device)
for i in range(len(order)):
j = order[i]
X_block[j] = X[i]
return X_block
def split_data(x, y, all_smi, lens, k_fold):
y = np.array(y, dtype=np.float64)
all_smi = np.array(all_smi)
lens = np.array(lens)
# save_path = 'FreeSolv/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('FreeSolv/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(4, True, 8)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'FreeSolv/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
train_split_lens = lens[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
val_split_lens = lens[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
test_split_lens = lens[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi,val_split_lens,\
test_split_x, test_split_y, test_split_smi,test_split_lens
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 1)
self.dropout = nn.Dropout(p=0.5)
# 每一个batch都要重新初始化hidden、cell,不然模型会将
# 上一个batch的hidden、cell作为初始值
self.hidden = self.init_hidden()
def init_hidden(self):
# 一开始并没有隐藏状态所以我们要先初始化一个,
# 隐藏状态形状为 (nb_layers, batch_size, hidden_dim)
hidden = torch.randn(2,
16,
1024)
cell = torch.randn(2,
16,
1024)
# 也可以用随机初始化而不是用0初始化,但哪个比较好还没研究
# hidden = torch.randn(self.nb_layres,
# self.batch_size,
# self.hidden_dim)
# cell = torch.randn(self.nb_layers,
# self.batch_size,
# self.hidden_dim)
return (hidden.to(device), cell.to(device))
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
att = torch.matmul(x, context.unsqueeze(2))/ math.sqrt(d_k)
att = torch.sigmoid(att.squeeze())
return context, alpha_n, att
def forward(self, x, x_lens):
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
x = x.to(device)
out = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
out = self.fc(out)
# out = F.leaky_relu(out)
# out = self.dropout(out)
# packing
# out = pack_padded_sequence(out, x_lens,
# batch_first=True,
# enforce_sorted=False)
out, (hidden, cell) = self.lstm(out) #h_state是之前的隐层状态
# query = self.dropout(out)
# #
# # 加入attention机制
# attn_output, alpha_n, att = self.attention_net(out, query)
# unpacking
# out, lens = pad_packed_sequence(out, batch_first=True)
alpha_n =0
att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.leaky_relu(out)
# out = F.dropout(out, p=0.3)
# out = self.fc2(out)
# out = F.leaky_relu(out)
# out = F.dropout(out, p=0.3)
out = self.fc3(out)
out = F.leaky_relu(out)
out = self.dropout(out)
out = self.fc4(out)
# outputs = []
# for i, out_tmp in enumerate(out):
# # out_tmp = torch.mean(out_tmp[:lens[i],:], dim=0).squeeze()
# out_tmp = out_tmp[lens[i]-1,:]
# out_tmp = self.fc3(out_tmp)
# out_tmp = F.leaky_relu(out_tmp)
# out_tmp = self.dropout(out_tmp)
# out_tmp = self.fc4(out_tmp)
# outputs.append(out_tmp)
# out = torch.stack(outputs, dim=0)
return out, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi, len):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
self.len = len
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item], self.len[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 2000
batch_size = 32
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
b = 0.5
seed = 199
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# filepath = "FreeSolv/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('FreeSolv/label.pkl')
all_smi = np.array(joblib.load('FreeSolv/smi.pkl'))
x = joblib.load('FreeSolv/FreeSolv_embed.pkl')
lens = joblib.load('FreeSolv/lens.pkl')
# 5-Fold
# 5-Fold
train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi, val_split_lens,\
test_split_x, test_split_y, test_split_smi, test_split_lens = split_data(x, y, all_smi, lens, 1)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi, train_split_lens)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi, val_split_lens)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi, test_split_lens)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM().to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters()), lr=learning_rate, weight_decay = weight_decay)
optimizer = torch.optim.SGD(rnn.parameters(),
lr=learning_rate, weight_decay = weight_decay,
momentum = momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
optimizer.zero_grad()
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'FreeSolv/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[1], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'FreeSolv/lstm_net.pth')
att_flag = False
# if test_avg_loss < 0.5:
# att_flag = True
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
loss = 0
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mse = mean_squared_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," rmse : ", rmse)
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/FreeSolv/test.py | .py | 1,531 | 40 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tasks.FreeSolv.train import LSTM, MyDataset
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
import numpy as np
# 设置超参数
input_size = 512
num_layers = 2 #定义超参数rnn的层数,层数为1层
hidden_size = 512 #定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.02 #定义超参数学习率
epoch_num = 1000
batch_size = 1
best_loss = 10000
test_best_loss = 1000
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# filepath="FreeSolv/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('FreeSolv/label.pkl')
all_smi = np.array(joblib.load('FreeSolv/smi.pkl'))
x = joblib.load('FreeSolv/FreeSolv_embed.pkl')
data_test = MyDataset(x, y,all_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True, drop_last=True)
rnn = LSTM().to(device)
rnn_dict = torch.load('FreeSolv/lstm_net.pth')
task_matrix = torch.load('FreeSolv/task_matrix.pth')
# print(task_matrix[0], task_matrix[1], task_matrix[2])
rnn.load_state_dict(rnn_dict)
rnn.cuda()
rnn.eval()
for tmp_x, tmp_y, tmp_smi in dataset_test:
out, alpha_n, att_n = rnn(tmp_x, task_matrix)
print(tmp_smi[0],out.cpu().detach().numpy()[0], tmp_y.cpu().detach().numpy()[0], ) | Python |
3D | lvqiujie/Mol2Context-vec | tasks/clintox/clintox_train.py | .py | 12,871 | 262 | import os
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import random
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from tasks.utils.util import *
from tasks.utils.model import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
# 设置超参数
input_size = 512
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.001 # 定义超参数学习率
epoch_num = 200
batch_size = 32
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
b = 0.6
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
y = joblib.load('clintox/label.pkl')
all_smi = joblib.load('clintox/smi.pkl')
x = joblib.load('clintox/clintox_embed.pkl')
print("data len is ", x.shape[0])
tasks = ["FDA_APPROVED", "CT_TOX"]
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi, weights = split_multi_label(x, y, all_smi, 3, 'clintox')
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM(len(tasks), task_type="muti", input_size=300).to(device)
# 设置优化器和损失函数
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay,
# momentum=momentum)
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay)
# optimizer = torch.optim.RMSprop(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay)
# loss_function = F.cross_entropy
# loss_function = F.nll_loss
# loss_function = nn.CrossEntropyLoss()
loss_function = [nn.CrossEntropyLoss(torch.Tensor(weight).to(device), reduction='mean') for weight in weights]
# loss_function = nn.BCELoss()
# loss_function = nn.BCEWithLogitsLoss()
# loss_function = FocalLoss(alpha=1 / train_weights[0])
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
optimizer.zero_grad()
outputs = rnn(tmp_compound.to(device))
loss = 0
for i in range(len(tasks)):
validId = np.where((tmp_y[:,i].cpu().numpy() == 0) | (tmp_y[:,i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
# print(outputs.shape)
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:,i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(len(tasks))]
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(len(tasks)):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task[i])[0]) for i in
range(len(tasks))]
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(len(tasks))]
print("epoch:", epoch, " val " "avg_loss:", test_avg_loss,
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " val_auc: ", trn_roc,
" val_auc: ", np.array(trn_roc).mean(),
# " val_pr: ", trn_prc,
" val_pr: ", np.array(trn_prc).mean())
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
PATH = 'clintox/lstm_net.pth'
print("test save model")
torch.save(rnn.state_dict(), PATH)
with torch.no_grad():
rnn.eval()
pre_avg_loss = 0
pre_sum_loss = 0
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(len(tasks)):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
y_pred_s = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_lable = np.zeros_like(y_pred_s, dtype=int)
pred_lable[np.where(np.asarray(y_pred_s) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
except:
y_true_task[i] = []
y_pred_task[i] = []
y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
pre_sum_loss += loss.item()
pre_avg_loss = pre_sum_loss / (index + 1)
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task_score[i]) for i in range(len(tasks))]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task_score[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task_score[i])[0]) for i in
range(len(tasks))]
acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(len(tasks))]
print("epoch:", epoch, " test " "avg_loss:", pre_avg_loss,
"acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " test_auc: ", trn_roc,
" test_auc: ", np.array(trn_roc).mean(),
# " test_pr: ", trn_prc,
" test_pr: ", np.array(trn_prc).mean())
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/clintox/get_clintox_data.py | .py | 5,581 | 181 | import pandas as pd
import numpy as np
from rdkit import Chem
import os
from rdkit.Chem import Descriptors
from sklearn.externals import joblib
# step 1
filepath="clintox/clintox.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
w_file = open("clintox/clintox.smi", mode='w', encoding="utf-8")
all_label = []
all_smi = []
for line in df.values:
# aa = np.array(line[:17], dtype = np.float64)
# a =np.isnan(aa)
smi = line[0].strip()
mol = Chem.MolFromSmiles(smi)
all_label.append(line[1:3])
all_smi.append(smi)
w_file.write(smi + "\n")
# try:
# if 12 <= Descriptors.MolWt(mol) <= 600:
# if -5 <= Descriptors.MolLogP(mol) <= 7:
# flag = True
# for t in mol.GetAtoms():
# if t.GetSymbol() not in ["H", "B", "C", "N", "O", "F", "P", "S", "Cl", "Br", "I", "Si"]:
# flag = False
# print(t.GetSymbol())
# break
# if flag:
# all_label.append(line[1:3])
# all_smi.append(smi)
# w_file.write(smi + "\n")
# except:
# print("error "+smi)
w_file.close()
# step 2
adb = "mol2vec corpus -i clintox/clintox.smi -o clintox/clintox.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("clintox/clintox_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
index = -1
mols_path = "clintox/clintox.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, 'clintox/label.pkl')
joblib.dump(smi, 'clintox/smi.pkl')
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': 'clintox/clintox_tran.cp_UNK',
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_300_0",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
# 'vocab_size': 121,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 300,
'hidden_units_size': 150,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join(DATA_SET_DIR, parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, 'clintox/clintox_embed.pkl')
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/toxcast/toxcast_train.py | .py | 20,961 | 442 | import sys, os
sys.path.append('./')
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import precision_recall_curve
import numpy as np
import math
import random
from sklearn import metrics
# from utils.util import *
# from utils.model import MyDataset
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item]
def __len__(self):
return len(self.compound)
def split_multi_label(x, y, smi, k_fold, name):
y = np.array(y).astype(float)
# y[np.where(np.isnan(y))] = 6
all_smi = np.array(smi)
# save_path = 'tox/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('tox/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, False, 100)
all_train_index = [[],[],[],[],[]]
all_train_index_weights = [[] for i in range(y.shape[1])]
all_val_index = [[],[],[],[],[]]
all_test_index = [[],[],[],[],[]]
for task_index in range(y.shape[-1]):
negative_index = np.where(y[:, task_index] == 0)[0]
positive_index = np.where(y[:, task_index] == 1)[0]
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(negative_index)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(negative_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(negative_index[test_tmp[:num_t]])
test_index[k].extend(negative_index[test_tmp[num_t:]])
for k, tmp in enumerate(kf.split(positive_index)):
train_tmp, test_tmp = tmp
train_index[k].extend(positive_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(positive_index[test_tmp[:num_t]])
test_index[k].extend(positive_index[test_tmp[num_t:]])
all_train_index_weights[task_index] = [(len(negative_index) + len(positive_index)) / len(negative_index),
(len(negative_index) + len(positive_index)) / len(positive_index)]
if task_index == 0:
all_train_index = train_index
all_val_index = val_index
all_test_index = test_index
else:
all_train_index = [list(set(all_train_index[i]).union(set(t))) for i, t in enumerate(train_index)]
all_val_index = [list(set(all_val_index[i]).union(set(t))) for i, t in enumerate(val_index)]
all_test_index = [list(set(all_test_index[i]).union(set(t))) for i, t in enumerate(test_index)]
for i in range(5):
joblib.dump({"train_index":all_train_index[i],
"val_index": all_val_index[i],
"test_index": all_test_index[i],
}, name+'/'+str(i+1)+'-fold-index.pkl')
joblib.dump(all_train_index_weights, name+'/weights.pkl')
train_split_x = x[all_train_index[k_fold]]
train_split_y = y[all_train_index[k_fold]]
train_split_smi = all_smi[all_train_index[k_fold]]
val_split_x = x[all_val_index[k_fold]]
val_split_y = y[all_val_index[k_fold]]
val_split_smi = all_smi[all_val_index[k_fold]]
test_split_x = x[all_test_index[k_fold]]
test_split_y = y[all_test_index[k_fold]]
test_split_smi = all_smi[all_test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi, all_train_index_weights
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, out_num, task_type='sing'):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.fc = nn.Linear(600, 2048)
self.out_num = out_num * 2 if "muti".__eq__(task_type) else out_num
self.lstm = nn.LSTM(
input_size=2048,
hidden_size=2048,
num_layers=2,
batch_first=True,)
# bidirectional=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(1024, 512)
# self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(2048, self.out_num)
self.dropout = nn.Dropout(p=0.3)
# self.sig = nn.Sigmoid()
# self.bn1 = nn.BatchNorm1d(1024)
# self.bn2 = nn.BatchNorm1d(512)
# self.bn3 = nn.BatchNorm1d(128)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
return context, alpha_n
def forward(self, x):
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
# x, orderD = pack_sequences(x)
# print(self.matrix[0],self.matrix[1],self.matrix[2])
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
x = self.fc(x.to(device)).to(device)
# changed_length1 = length[orderD]
# x = pack_padded_sequence(x, changed_length1, batch_first=True)
out,(h_n, c_n) = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
# out = torch.mean(out,dim=1).squeeze().cuda()
# out = out[:,-1,:]
query = self.dropout(out)
# 加入attention机制
attn_output, alpha_n = self.attention_net(out, query)
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.relu(out)
# out = self.bn1(F.dropout(out, p=0.3))
# out = self.fc2(out)
# out = F.relu(out)
# out = self.bn2(F.dropout(out, p=0.3))
# out = self.fc3(out)
# out = F.relu(out)
# out = self.dropout(out)
out = self.fc4(attn_output)
# return F.softmax(out,dim=-1)
return out
if __name__ == '__main__':
# 设置超参数
input_size = 512
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 2000
batch_size = 128
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
tasks_num = 617
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
y = joblib.load("toxcast/toxcast.pkl")
all_smi = joblib.load("toxcast/smi.pkl")
# x = joblib.load("toxcast/toxcast_embed.pkl")
x = []
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi, weights = split_multi_label(x, y, all_smi, 3, 'toxcast')
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM(tasks_num, "muti").to(device)
# 设置优化器和损失函数
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay,
# momentum=momentum)
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay)
# optimizer = torch.optim.Adadelta(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay, rho=0.9)
# optimizer = torch.optim.RMSprop(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay)
# loss_function = F.cross_entropy
# loss_function = F.nll_loss
loss_function = [nn.CrossEntropyLoss(torch.Tensor(weight).to(device), reduction='mean') for weight in weights]
# loss_function = nn.BCELoss()
# loss_function = nn.BCEWithLogitsLoss()
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
# tmp_y = tmp_y.float()
optimizer.zero_grad()
outputs = rnn(tmp_compound.to(device))
loss = 0
for i in range(tasks_num):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId).to(device)]
y_label = tmp_y[:, i][torch.tensor(validId).to(device)]
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred.to(device), y_label.long().to(device))
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
# flood = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
# cm = [metrics.confusion_matrix(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task[i])[0]) for i in range(tasks_num)]
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(tasks_num)]
print("epoch:", epoch, " train " "avg_loss:", avg_loss.item(),
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
" train_auc: ", np.array(trn_roc).mean(),
" train_pr: ", np.array(trn_prc).mean())
with torch.no_grad():
rnn.eval()
val_sum_loss = []
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(tasks_num):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
val_sum_loss.append(loss.cpu().detach().numpy())
val_avg_loss = np.array(val_sum_loss).mean()
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task[i])[0]) for i in
range(tasks_num)]
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(tasks_num)]
print("epoch:", epoch, " val " "avg_loss:", val_avg_loss,
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " val_auc: ", trn_roc,
" val_auc: ", np.array(trn_roc).mean(),
# " val_pr: ", trn_prc,
" val_pr: ", np.array(trn_prc).mean())
# 保存模型
if val_avg_loss < test_best_loss:
test_best_loss = val_avg_loss
PATH = 'toxcast/lstm_net.pth'
print("test save model")
torch.save(rnn.state_dict(), PATH)
with torch.no_grad():
rnn.eval()
test_sum_loss = []
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(tasks_num):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
y_pred_s = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_lable = np.zeros_like(y_pred_s, dtype=int)
pred_lable[np.where(np.asarray(y_pred_s) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
except:
y_true_task[i] = []
y_pred_task[i] = []
y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
test_sum_loss.append(loss.cpu().detach().numpy())
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task_score[i]) for i in range(tasks_num)]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task_score[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task_score[i])[0]) for i in
range(tasks_num)]
# print(len(trn_roc))
# print(sum(y_true_task[0]))
# print(sum(y_pred_task[0]))
acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(tasks_num)]
print("epoch:", epoch, " test " "avg_loss:", np.array(test_sum_loss).mean(),
"acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " test_auc: ", trn_roc,
" test_auc: ", np.array(trn_roc).mean(),
# " test_pr: ", trn_prc,
" test_pr: ", np.array(trn_prc).mean())
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/toxcast/get_toxcast_data.py | .py | 4,829 | 161 | import sys
sys.path.append('./')
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import os
# step 1
filepath="toxcast/toxcast_data.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
all_label = []
all_smi = []
w_file = open("toxcast/toxcast.smi", mode='w',encoding="utf-8")
for line in df.values:
smi = line[0].strip()
if len(smi) <= 0:
break
all_label.append(line[1:])
all_smi.append(smi)
w_file.write(smi+"\n")
w_file.close()
# step 2
adb = "mol2vec corpus -i toxcast/toxcast.smi -o toxcast/toxcast.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("toxcast/toxcast_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
index = -1
mols_path = "toxcast/toxcast.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, "toxcast/toxcast.pkl")
joblib.dump(smi, "toxcast/smi.pkl")
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': "toxcast/toxcast_tran.cp_UNK",
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_best",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 300,
'hidden_units_size': 150,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join(DATA_SET_DIR, parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, "toxcast/toxcast_embed.pkl")
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/esol/esol_train.py | .py | 13,443 | 360 | from rdkit import Chem
import torch
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import seaborn as sns
import math
import random
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_len(smi):
mol = Chem.MolFromSmiles(smi)
smiles = Chem.MolToSmiles(mol)
mol = Chem.MolFromSmiles(smiles)
mol_atoms = [a.GetIdx() for a in mol.GetAtoms()]
return len(mol_atoms)
def pack_sequences(X, order=None):
lengths = np.array([x.shape[0] for x in X])
features = X[0].shape[1]
n = len(X)
if order is None:
order = np.argsort(lengths)[::-1] # 从后向前取反向的元素
m = max(lengths)
X_block = X[0].new(n, m, features).zero_()
for i in range(n):
j = order[i]
x = X[j]
X_block[i, :len(x), :] = x
return X_block, order
def unpack_sequences(X, order):
X, lengths = pad_packed_sequence(X, batch_first=True)
X_block = torch.zeros(size=X.size()).to(device)
for i in range(len(order)):
j = order[i]
X_block[j] = X[i]
return X_block
def split_data(x, y, all_smi, lens, k_fold):
y = np.array(y, dtype=np.float64)
all_smi = np.array(all_smi)
lens = np.array(lens)
# save_path = 'esol/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('esol/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(4, True, 100)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'esol/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
train_split_lens = lens[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
val_split_lens = lens[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
test_split_lens = lens[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi,val_split_lens,\
test_split_x, test_split_y, test_split_smi,test_split_lens
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 1)
self.dropout = nn.Dropout(p=0.5)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
att = torch.matmul(x, context.unsqueeze(2))/ math.sqrt(d_k)
att = torch.sigmoid(att.squeeze())
return context, alpha_n, att
def forward(self, x, x_lens):
# print(self.matrix1, self.matrix2, self.matrix3)
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
# x, orderD = pack_sequences(x)
# x = self.matrix1 * x[:,0,:,:] + self.matrix2 * x[:,1,:,:] + self.matrix3 * x[:,2,:,:]
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
x = self.fc(x.to(device)).to(device)
# packing
# embed_packed = pack_padded_sequence(x, x_lens,
# batch_first=True,
# enforce_sorted=False)
out, (hidden, cell) = self.lstm(x) #h_state是之前的隐层状态
query = self.dropout(out)
# 加入attention机制
out_att, alpha_n, att = self.attention_net(out, query)
# alpha_n =0
# att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
out_tmp = self.fc3(out)
out_tmp = F.leaky_relu(out_tmp)
out_tmp = self.dropout(out_tmp)
out = self.fc4(out_tmp)
# outputs = []
# for i, out_tmp in enumerate(out):
# # out_tmp = torch.mean(out_tmp[:lens[i],:], dim=0).squeeze()
# out_tmp = out_tmp[lens[i]-1,:]
# out_tmp = self.fc3(out_tmp)
# out_tmp = F.leaky_relu(out_tmp)
# out_tmp = self.dropout(out_tmp)
# out_tmp = self.fc4(out_tmp)
# outputs.append(out_tmp)
# out = torch.stack(outputs, dim=0)
return out, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi, len):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
self.len = len
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item], self.len[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# best 0 0.01 16 188 100
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 1000
batch_size = 16
best_loss = 10000
test_best_loss = 1000
weight_decay = 1e-5
momentum = 0.9
b = 0.051
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# filepath = "esol/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('esol/label.pkl')
all_smi = np.array(joblib.load('esol/smi.pkl'))
x = joblib.load('esol/esol_embed.pkl')
lens = joblib.load('esol/lens.pkl')
# 5-Fold
train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi, val_split_lens,\
test_split_x, test_split_y, test_split_smi, test_split_lens = split_data(x, y, all_smi, lens, 0)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi, train_split_lens)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi, val_split_lens)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi, test_split_lens)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM().to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters()), lr=learning_rate)
optimizer = torch.optim.SGD(list(rnn.parameters()),
lr=learning_rate, weight_decay = weight_decay,
momentum = momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
# print(task_matrix[0], task_matrix[1], task_matrix[2])
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
optimizer.zero_grad()
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'esol/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[1], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'esol/lstm_net.pth')
att_flag = False
# if test_avg_loss < 0.5:
# att_flag = True
# print(task_matrix[0], task_matrix[1], task_matrix[2])
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
loss = 0
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device))
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mae = mean_absolute_error(all_label, all_pred)
mse = mean_squared_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," mae : ", mae
," rmse : ", rmse)
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/esol/get_data.py | .py | 5,780 | 185 | import pandas as pd
from sklearn.externals import joblib
import numpy as np
from rdkit import Chem
from rdkit.Chem import Descriptors
import os
# step 1
filepath="esol/delaney-processed3.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
w_file = open("esol/esol.smi", mode='w',encoding="utf-8")
all_label = []
all_smi = []
for line in df.values:
smi = Chem.MolToSmiles(Chem.MolFromSmiles(line[9].strip()), isomericSmiles=True)
if "Cc1cccc([N+](=O)[O-])c1".__eq__(smi):
ss= 1
if len(smi) <= 0:
break
mol = Chem.MolFromSmiles(smi)
all_smi.append(smi)
all_label.append(line[8])
w_file.write(smi + "\n")
# try:
# if 12 <= Descriptors.MolWt(mol) <= 600:
# if -5 <= Descriptors.MolLogP(mol) <= 7:
# flag = True
# for t in mol.GetAtoms():
# if t.GetSymbol() not in ["H", "B", "C", "N", "O", "F", "P", "S", "Cl", "Br", "I", "Si"]:
# flag = False
# print("############ ",smi,t.GetSymbol())
# break
# if flag:
# all_smi.append(smi)
# all_label.append(line[8])
# w_file.write(smi + "\n")
# except:
# print("error "+smi)
w_file.close()
# step 2
adb = "mol2vec corpus -i esol/esol.smi -o esol/esol.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("esol/esol_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
lens = []
index = -1
mols_path = "esol/esol.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
words = line.split()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
lens.append(len(words) if len(words) + 2 <= sentence_maxlen else 80)
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, 'esol/label.pkl')
joblib.dump(smi, 'esol/smi.pkl')
joblib.dump(lens, 'esol/lens.pkl')
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': 'esol/esol_tran.cp_UNK',
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_512",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
# 'vocab_size': 121,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 512,
'hidden_units_size': 300,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join(DATA_SET_DIR, parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, 'esol/esol_embed.pkl')
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/esol/esol_train2.py | .py | 33,347 | 778 | from rdkit import Chem
import torch
import os
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
# from paper_data.plot_morgan import main
import numpy as np
import seaborn as sns
import math
import pickle
import random
from rdkit.Chem import MolFromSmiles
# from AttentiveFP.Featurizer import *
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.optim as optim
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
# from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
p_dropout = 0.2
fingerprint_dim = 200
# also known as l2_regularization_lambda
weight_decay = 5
learning_rate = 2.5
# for regression model
output_units_num = 1
radius = 2
T = 2
smilesList = ['CC']
degrees = [0, 1, 2, 3, 4, 5]
class MolGraph(object):
def __init__(self):
self.nodes = {} # dict of lists of nodes, keyed by node type
def new_node(self, ntype, features=None, rdkit_ix=None):
new_node = Node(ntype, features, rdkit_ix)
self.nodes.setdefault(ntype, []).append(new_node)
return new_node
def add_subgraph(self, subgraph):
old_nodes = self.nodes
new_nodes = subgraph.nodes
for ntype in set(old_nodes.keys()) | set(new_nodes.keys()):
old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, []))
def sort_nodes_by_degree(self, ntype):
nodes_by_degree = {i : [] for i in degrees}
for node in self.nodes[ntype]:
nodes_by_degree[len(node.get_neighbors(ntype))].append(node)
new_nodes = []
for degree in degrees:
cur_nodes = nodes_by_degree[degree]
self.nodes[(ntype, degree)] = cur_nodes
new_nodes.extend(cur_nodes)
self.nodes[ntype] = new_nodes
def feature_array(self, ntype):
assert ntype in self.nodes
return np.array([node.features for node in self.nodes[ntype]])
def rdkit_ix_array(self):
return np.array([node.rdkit_ix for node in self.nodes['atom']])
def neighbor_list(self, self_ntype, neighbor_ntype):
assert self_ntype in self.nodes and neighbor_ntype in self.nodes
neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])}
return [[neighbor_idxs[neighbor]
for neighbor in self_node.get_neighbors(neighbor_ntype)]
for self_node in self.nodes[self_ntype]]
class Node(object):
__slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix']
def __init__(self, ntype, features, rdkit_ix):
self.ntype = ntype
self.features = features
self._neighbors = []
self.rdkit_ix = rdkit_ix
def add_neighbors(self, neighbor_list):
for neighbor in neighbor_list:
self._neighbors.append(neighbor)
neighbor._neighbors.append(self)
def get_neighbors(self, ntype):
return [n for n in self._neighbors if n.ntype == ntype]
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if args in self.cache:
return self.cache[args]
else:
result = self.func(*args)
self.cache[args] = result
return result
def __get__(self, obj, objtype):
return partial(self.__call__, obj)
def graph_from_smiles(smiles):
graph = MolGraph()
mol = MolFromSmiles(smiles)
if not mol:
raise ValueError("Could not parse SMILES string:", smiles)
atoms_by_rd_idx = {}
for atom in mol.GetAtoms():
new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx())
atoms_by_rd_idx[atom.GetIdx()] = new_atom_node
for bond in mol.GetBonds():
atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
new_bond_node = graph.new_node('bond', features=bond_features(bond))
new_bond_node.add_neighbors((atom1_node, atom2_node))
atom1_node.add_neighbors((atom2_node,))
mol_node = graph.new_node('molecule')
mol_node.add_neighbors(graph.nodes['atom'])
return graph
def array_rep_from_smiles(molgraph):
"""Precompute everything we need from MolGraph so that we can free the memory asap."""
#molgraph = graph_from_smiles_tuple(tuple(smiles))
degrees = [0,1,2,3,4,5]
arrayrep = {'atom_features' : molgraph.feature_array('atom'),
'bond_features' : molgraph.feature_array('bond'),
'atom_list' : molgraph.neighbor_list('molecule', 'atom'),
'rdkit_ix' : molgraph.rdkit_ix_array()}
for degree in degrees:
arrayrep[('atom_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int)
arrayrep[('bond_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int)
return arrayrep
def gen_descriptor_data(smilesList):
smiles_to_fingerprint_array = {}
for i, smiles in enumerate(smilesList):
# if i > 5:
# print("Due to the limited computational resource, submission with more than 5 molecules will not be processed")
# break
smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True)
try:
molgraph = graph_from_smiles(smiles)
molgraph.sort_nodes_by_degree('atom')
arrayrep = array_rep_from_smiles(molgraph)
smiles_to_fingerprint_array[smiles] = arrayrep
except:
print(smiles,"%%%%%%%%")
# time.sleep(3)
return smiles_to_fingerprint_array
def save_smiles_dicts(smilesList, filename):
# first need to get the max atom length
max_atom_len = 0
max_bond_len = 0
num_atom_features = 0
num_bond_features = 0
smiles_to_rdkit_list = {}
smiles_to_fingerprint_features = gen_descriptor_data(smilesList)
for smiles, arrayrep in smiles_to_fingerprint_features.items():
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
rdkit_list = arrayrep['rdkit_ix']
smiles_to_rdkit_list[smiles] = rdkit_list
atom_len, num_atom_features = atom_features.shape
bond_len, num_bond_features = bond_features.shape
if atom_len > max_atom_len:
max_atom_len = atom_len
if bond_len > max_bond_len:
max_bond_len = bond_len
# then add 1 so I can zero pad everything
max_atom_index_num = max_atom_len
max_bond_index_num = max_bond_len
max_atom_len += 1
max_bond_len += 1
smiles_to_atom_info = {}
smiles_to_bond_info = {}
smiles_to_atom_neighbors = {}
smiles_to_bond_neighbors = {}
smiles_to_atom_mask = {}
degrees = [0, 1, 2, 3, 4, 5]
# then run through our numpy array again
for smiles, arrayrep in smiles_to_fingerprint_features.items():
mask = np.zeros((max_atom_len))
# get the basic info of what
# my atoms and bonds are initialized
atoms = np.zeros((max_atom_len, num_atom_features))
bonds = np.zeros((max_bond_len, num_bond_features))
# then get the arrays initlialized for the neighbors
atom_neighbors = np.zeros((max_atom_len, len(degrees)))
bond_neighbors = np.zeros((max_atom_len, len(degrees)))
# now set these all to the last element of the list, which is zero padded
atom_neighbors.fill(max_atom_index_num)
bond_neighbors.fill(max_bond_index_num)
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
for i, feature in enumerate(atom_features):
mask[i] = 1.0
atoms[i] = feature
for j, feature in enumerate(bond_features):
bonds[j] = feature
atom_neighbor_count = 0
bond_neighbor_count = 0
working_atom_list = []
working_bond_list = []
for degree in degrees:
atom_neighbors_list = arrayrep[('atom_neighbors', degree)]
bond_neighbors_list = arrayrep[('bond_neighbors', degree)]
if len(atom_neighbors_list) > 0:
for i, degree_array in enumerate(atom_neighbors_list):
for j, value in enumerate(degree_array):
atom_neighbors[atom_neighbor_count, j] = value
atom_neighbor_count += 1
if len(bond_neighbors_list) > 0:
for i, degree_array in enumerate(bond_neighbors_list):
for j, value in enumerate(degree_array):
bond_neighbors[bond_neighbor_count, j] = value
bond_neighbor_count += 1
# then add everything to my arrays
smiles_to_atom_info[smiles] = atoms
smiles_to_bond_info[smiles] = bonds
smiles_to_atom_neighbors[smiles] = atom_neighbors
smiles_to_bond_neighbors[smiles] = bond_neighbors
smiles_to_atom_mask[smiles] = mask
del smiles_to_fingerprint_features
feature_dicts = {}
# feature_dicts['smiles_to_atom_mask'] = smiles_to_atom_mask
# feature_dicts['smiles_to_atom_info']= smiles_to_atom_info
feature_dicts = {
'smiles_to_atom_mask': smiles_to_atom_mask,
'smiles_to_atom_info': smiles_to_atom_info,
'smiles_to_bond_info': smiles_to_bond_info,
'smiles_to_atom_neighbors': smiles_to_atom_neighbors,
'smiles_to_bond_neighbors': smiles_to_bond_neighbors,
'smiles_to_rdkit_list': smiles_to_rdkit_list
}
pickle.dump(feature_dicts, open(filename + '.pickle', "wb"))
print('feature dicts file saved as ' + filename + '.pickle')
return feature_dicts
def split_data(x, y, all_smi, lens, k_fold):
y = np.array(y, dtype=np.float64)
all_smi = np.array(all_smi)
lens = np.array(lens)
# save_path = 'esol/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('esol/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(4, True, 8)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'esol/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
train_split_lens = lens[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
val_split_lens = lens[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
test_split_lens = lens[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi,val_split_lens,\
test_split_x, test_split_y, test_split_smi,test_split_lens
def get_smiles_array(smilesList, feature_dicts):
x_mask = []
x_atom = []
x_bonds = []
x_atom_index = []
x_bond_index = []
for smiles in smilesList:
x_mask.append(feature_dicts['smiles_to_atom_mask'][smiles])
x_atom.append(feature_dicts['smiles_to_atom_info'][smiles])
x_bonds.append(feature_dicts['smiles_to_bond_info'][smiles])
x_atom_index.append(feature_dicts['smiles_to_atom_neighbors'][smiles])
x_bond_index.append(feature_dicts['smiles_to_bond_neighbors'][smiles])
return np.asarray(x_atom),np.asarray(x_bonds),np.asarray(x_atom_index),\
np.asarray(x_bond_index),np.asarray(x_mask),feature_dicts['smiles_to_rdkit_list']
class Fingerprint(nn.Module):
def __init__(self, radius, T, input_feature_dim, input_bond_dim, \
fingerprint_dim, output_units_num, p_dropout):
super(Fingerprint, self).__init__()
# graph attention for atom embedding
self.atom_fc = nn.Linear(input_feature_dim, fingerprint_dim)
self.neighbor_fc = nn.Linear(input_feature_dim + input_bond_dim, fingerprint_dim)
self.GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for r in range(radius)])
self.align = nn.ModuleList([nn.Linear(2 * fingerprint_dim, 1) for r in range(radius)])
self.attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for r in range(radius)])
# graph attention for molecule embedding
self.mol_GRUCell = nn.GRUCell(fingerprint_dim, fingerprint_dim)
self.mol_align = nn.Linear(2 * fingerprint_dim, 1)
self.mol_attend = nn.Linear(fingerprint_dim, fingerprint_dim)
# you may alternatively assign a different set of parameter in each attentive layer for molecule embedding like in atom embedding process.
# self.mol_GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for t in range(T)])
# self.mol_align = nn.ModuleList([nn.Linear(2*fingerprint_dim,1) for t in range(T)])
# self.mol_attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for t in range(T)])
self.dropout = nn.Dropout(p=p_dropout)
self.output = nn.Linear(fingerprint_dim, output_units_num)
self.radius = radius
self.T = T
def forward(self, atom_list, bond_list, atom_degree_list, bond_degree_list, atom_mask):
atom_mask = atom_mask.unsqueeze(2)
batch_size, mol_length, num_atom_feat = atom_list.size()
atom_feature = F.leaky_relu(self.atom_fc(atom_list))
bond_neighbor = [bond_list[i][bond_degree_list[i]] for i in range(batch_size)]
bond_neighbor = torch.stack(bond_neighbor, dim=0)
atom_neighbor = [atom_list[i][atom_degree_list[i]] for i in range(batch_size)]
atom_neighbor = torch.stack(atom_neighbor, dim=0)
# then concatenate them
neighbor_feature = torch.cat([atom_neighbor, bond_neighbor], dim=-1)
neighbor_feature = F.leaky_relu(self.neighbor_fc(neighbor_feature))
# generate mask to eliminate the influence of blank atoms
attend_mask = atom_degree_list.clone()
attend_mask[attend_mask != mol_length - 1] = 1
attend_mask[attend_mask == mol_length - 1] = 0
attend_mask = attend_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
softmax_mask = atom_degree_list.clone()
softmax_mask[softmax_mask != mol_length - 1] = 0
softmax_mask[softmax_mask == mol_length - 1] = -9e8 # make the softmax value extremly small
softmax_mask = softmax_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
batch_size, mol_length, max_neighbor_num, fingerprint_dim = neighbor_feature.shape
atom_feature_expand = atom_feature.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[0](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[0](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = atom_feature.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[0](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
for d in range(self.radius - 1):
# bonds_indexed = [bond_list[i][torch.cuda.LongTensor(bond_degree_list)[i]] for i in range(batch_size)]
neighbor_feature = [activated_features[i][atom_degree_list[i]] for i in range(batch_size)]
# neighbor_feature is a list of 3D tensor, so we need to stack them into a 4D tensor first
neighbor_feature = torch.stack(neighbor_feature, dim=0)
atom_feature_expand = activated_features.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[d + 1](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[d + 1](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
# atom_feature_reshape = atom_feature.view(batch_size*mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[d + 1](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
mol_feature = torch.sum(activated_features * atom_mask, dim=-2)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_softmax_mask = atom_mask.clone()
mol_softmax_mask[mol_softmax_mask == 0] = -9e8
mol_softmax_mask[mol_softmax_mask == 1] = 0
mol_softmax_mask = mol_softmax_mask.type(torch.cuda.FloatTensor)
for t in range(self.T):
mol_prediction_expand = activated_features_mol.unsqueeze(-2).expand(batch_size, mol_length, fingerprint_dim)
mol_align = torch.cat([mol_prediction_expand, activated_features], dim=-1)
mol_align_score = F.leaky_relu(self.mol_align(mol_align))
mol_align_score = mol_align_score + mol_softmax_mask
mol_attention_weight = F.softmax(mol_align_score, -2)
mol_attention_weight = mol_attention_weight * atom_mask
# print(mol_attention_weight.shape,mol_attention_weight)
activated_features_transform = self.mol_attend(self.dropout(activated_features))
# aggregate embeddings of atoms in a molecule
mol_context = torch.sum(torch.mul(mol_attention_weight, activated_features_transform), -2)
# print(mol_context.shape,mol_context)
mol_context = F.elu(mol_context)
mol_feature = self.mol_GRUCell(mol_context, mol_feature)
# print(mol_feature.shape,mol_feature)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_prediction = self.output(self.dropout(mol_feature))
return atom_feature, mol_prediction, mol_feature
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, model):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.model = model
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
#
# # self.fc1 = nn.Linear(512, 1024)
# # self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512 + 200, 1)
# self.fc5 = nn.Linear(200, 1)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x, x_lens, tmp_smi):
# print(self.matrix1, self.matrix2, self.matrix3)
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
#
x = self.fc(x.to(device)).to(device)
# packing
# embed_packed = pack_padded_sequence(x, x_lens,
# batch_first=True,
# enforce_sorted=False)
out, (hidden, cell) = self.lstm(x) #h_state是之前的隐层状态
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
feature_dicts)
atoms_prediction, mol_prediction, mol_feature = self.model(torch.Tensor(x_atom).to(device),
torch.Tensor(x_bonds).to(device),
torch.cuda.LongTensor(x_atom_index),
torch.cuda.LongTensor(x_bond_index),
torch.Tensor(x_mask).to(device))
# unpacking
# out, lens = pad_packed_sequence(out, batch_first=True)
alpha_n =0
att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
out_tmp = self.fc3(out)
out_tmp = F.leaky_relu(out_tmp)
out_tmp = self.dropout(out_tmp)
out_tmp = torch.cat((out_tmp.view(-1, 512), mol_feature.view(-1, 200)), dim=1)
out_tmp = self.fc4(out_tmp)
# out_tmp = self.fc5(mol_feature)
# outputs = []
# for i, out_tmp in enumerate(out):
# # out_tmp = torch.mean(out_tmp[:lens[i],:], dim=0).squeeze()
# out_tmp = out_tmp[lens[i]-1,:]
# out_tmp = self.fc3(out_tmp)
# out_tmp = F.leaky_relu(out_tmp)
# out_tmp = self.dropout(out_tmp)
# out_tmp = self.fc4(out_tmp)
# outputs.append(out_tmp)
# out = torch.stack(outputs, dim=0)
return out_tmp, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi, len):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
self.len = len
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item], self.len[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 1000
batch_size = 16
best_loss = 100000
test_best_loss = 100000
weight_decay = 1e-5
momentum = 0.9
b = 0.04
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# filepath = "esol/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('esol/label.pkl')
all_smi = np.array(joblib.load('esol/smi.pkl'))
x = joblib.load('esol/esol_embed.pkl')
lens = joblib.load('esol/lens.pkl')
# 5-Fold
train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi, val_split_lens,\
test_split_x, test_split_y, test_split_smi, test_split_lens = split_data(x, y, all_smi, lens, 0)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi, train_split_lens)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi, val_split_lens)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi, test_split_lens)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
data_all = MyDataset(x, y, all_smi, lens)
dataset_all = data.DataLoader(dataset=data_all, batch_size=1, shuffle=True)
raw_filename = "esol/delaney-processed.csv"
feature_filename = raw_filename.replace('.csv','.pickle')
filename = raw_filename.replace('.csv','')
prefix_filename = raw_filename.split('/')[-1].replace('.csv','')
smiles_tasks_df = pd.read_csv(raw_filename)
smilesList = smiles_tasks_df.smiles.values
print("number of all smiles: ", len(smilesList))
atom_num_dist = []
remained_smiles = []
canonical_smiles_list = []
for smiles in smilesList:
try:
mol = Chem.MolFromSmiles(smiles)
atom_num_dist.append(len(mol.GetAtoms()))
remained_smiles.append(smiles)
canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))
except:
print(smiles,"######3")
pass
feature_filename = 'esol/delaney-processed'
# if os.path.isfile(feature_filename):
# print("NO esol/delaney-processed.pickle")
# feature_dicts = pickle.load(open(feature_filename, "rb"))
# else:
feature_dicts = save_smiles_dicts(smilesList, feature_filename)
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(
[canonical_smiles_list[0]], feature_dicts)
num_atom_features = x_atom.shape[-1]
num_bond_features = x_bonds.shape[-1]
model = Fingerprint(radius, T, num_atom_features, num_bond_features,
fingerprint_dim, output_units_num, p_dropout)
model.to(device)
rnn = LSTM(model).to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters()), lr=learning_rate)
optimizer = torch.optim.SGD(list(rnn.parameters()),
lr=learning_rate, weight_decay = weight_decay,
momentum = momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
# print(task_matrix[0], task_matrix[1], task_matrix[2])
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
optimizer.zero_grad()
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device),tmp_smi)
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'esol/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[1], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'esol/lstm_net.pth')
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
loss = 0
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mse = mean_squared_error(all_label, all_pred)
mae = mean_absolute_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," mae : ", mae
," rmse : ", rmse)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
rnn.eval()
for index, tmp in enumerate(dataset_all):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
print(outputs.cpu().detach().numpy()[0][0], tmp_y.cpu().detach().numpy()[0], tmp_smi[0]) | Python |
3D | lvqiujie/Mol2Context-vec | tasks/tox21/tox_train.py | .py | 21,491 | 462 | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
from sklearn.metrics import precision_recall_curve
import numpy as np
import math
import random
from sklearn import metrics
# from utils.util import *
# from utils.model import *
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, out_num, input_size=300, task_type='sing', att=False):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.input_size = input_size
self.out_num = out_num * 2 if "muti".__eq__(task_type) else out_num
self.att = att
self.fc = nn.Linear(self.input_size, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
# bidirectional=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, self.out_num)
self.dropout = nn.Dropout(p=0.3)
# self.sig = nn.Sigmoid()
# self.bn1 = nn.BatchNorm1d(1024)
# self.bn2 = nn.BatchNorm1d(512)
# self.bn3 = nn.BatchNorm1d(128)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
return context, alpha_n
def forward(self, x):
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
#
# x, orderD = pack_sequences(x)
# print(self.matrix[0],self.matrix[1],self.matrix[2])
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
x = self.fc(x.to(device)).to(device)
# changed_length1 = length[orderD]
# x = pack_padded_sequence(x, changed_length1, batch_first=True)
out,(h_n, c_n) = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
if self.att:
query = self.dropout(out)
# 加入attention机制
out, alpha_n = self.attention_net(out, query)
else:
out = torch.mean(out,dim=1).squeeze().cuda()
# out = out[:,-1,:]
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.relu(out)
# out = self.bn1(F.dropout(out, p=0.3))
# out = self.fc2(out)
# out = F.relu(out)
# out = self.bn2(F.dropout(out, p=0.3))
out = self.fc3(out)
out = F.relu(out)
out = self.dropout(out)
out = self.fc4(out)
# return F.softmax(out,dim=-1)
return out
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item]
def __len__(self):
return len(self.compound)
def split_multi_label(x, y, smi, k_fold, name):
y = np.array(y).astype(float)
all_smi = np.array(smi)
# save_path = 'tox/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('tox/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, False, 100)
all_train_index = [[],[],[],[],[]]
all_train_index_weights = [[] for i in range(y.shape[1])]
all_val_index = [[],[],[],[],[]]
all_test_index = [[],[],[],[],[]]
for task_index in range(y.shape[-1]):
negative_index = np.where(y[:, task_index] == 0)[0]
positive_index = np.where(y[:, task_index] == 1)[0]
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(negative_index)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(negative_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(negative_index[test_tmp[:num_t]])
test_index[k].extend(negative_index[test_tmp[num_t:]])
for k, tmp in enumerate(kf.split(positive_index)):
train_tmp, test_tmp = tmp
train_index[k].extend(positive_index[train_tmp])
num_t = int(len(test_tmp)/2)
val_index[k].extend(positive_index[test_tmp[:num_t]])
test_index[k].extend(positive_index[test_tmp[num_t:]])
all_train_index_weights[task_index] = [(len(negative_index) + len(positive_index)) / len(negative_index),
(len(negative_index) + len(positive_index)) / len(positive_index)]
if task_index == 0:
all_train_index = train_index
all_val_index = val_index
all_test_index = test_index
else:
all_train_index = [list(set(all_train_index[i]).union(set(t))) for i, t in enumerate(train_index)]
all_val_index = [list(set(all_val_index[i]).union(set(t))) for i, t in enumerate(val_index)]
all_test_index = [list(set(all_test_index[i]).union(set(t))) for i, t in enumerate(test_index)]
for i in range(5):
joblib.dump({"train_index":all_train_index[i],
"val_index": all_val_index[i],
"test_index": all_test_index[i],
}, name+'/'+str(i+1)+'-fold-index.pkl')
joblib.dump(all_train_index_weights, name+'/weights.pkl')
train_split_x = x[all_train_index[k_fold]]
train_split_y = y[all_train_index[k_fold]]
train_split_smi = all_smi[all_train_index[k_fold]]
val_split_x = x[all_val_index[k_fold]]
val_split_y = y[all_val_index[k_fold]]
val_split_smi = all_smi[all_val_index[k_fold]]
test_split_x = x[all_test_index[k_fold]]
test_split_y = y[all_test_index[k_fold]]
test_split_smi = all_smi[all_test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi, all_train_index_weights
if __name__ == '__main__':
# 设置超参数
input_size = 512
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 2000
batch_size = 128
best_loss = 10000
test_best_loss = 10000
weight_decay = 1e-5
momentum = 0.9
b = 0.2
dict_label = {"NR-AR": 0,
"NR-AR-LBD": 1,
"NR-AhR": 2,
"NR-Aromatase": 3,
"NR-ER": 4,
"NR-ER-LBD": 5,
"NR-PPAR-gamma": 6,
"SR-ARE": 7,
"SR-ATAD5": 8,
"SR-HSE": 9,
"SR-MMP": 10,
"SR-p53": 11, }
tasks = list(dict_label.keys())
tasks_num = len(tasks)
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
y = joblib.load("tox/label.pkl")
y = np.array(y).astype(float)
print(y.shape)
all_smi = joblib.load("tox/smi.pkl")
x = joblib.load("tox/tox_embed.pkl")
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi, weights = split_multi_label(x, y, all_smi, 3, 'tox')
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM(tasks_num, task_type="muti", input_size=300).to(device)
# 设置优化器和损失函数
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
optimizer = torch.optim.SGD(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay,
momentum=momentum)
# optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate, weight_decay=weight_decay)
# optimizer = torch.optim.Adadelta(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay, rho=0.9)
# optimizer = torch.optim.RMSprop(rnn.parameters(), lr=learning_rate, weight_decay = weight_decay)
# loss_function = F.cross_entropy
# loss_function = F.nll_loss
loss_function = [nn.CrossEntropyLoss(torch.Tensor(weight).to(device), reduction='mean') for weight in weights]
# loss_function = nn.BCELoss()
# loss_function = nn.BCEWithLogitsLoss()
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
# tmp_y = tmp_y.float()
optimizer.zero_grad()
outputs = rnn(tmp_compound.to(device))
loss = 0
for i in range(len(tasks)):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId).to(device)]
y_label = tmp_y[:, i][torch.tensor(validId).to(device)]
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred.to(device), y_label.long().to(device))
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
# flood = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
# cm = [metrics.confusion_matrix(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task[i])[0]) for i in range(len(tasks))]
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(len(tasks))]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(len(tasks))]
print("epoch:", epoch, " train " "avg_loss:", avg_loss.item(),
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
" train_auc: ", np.array(trn_roc).mean(),
" train_pr: ", np.array(trn_prc).mean())
with torch.no_grad():
rnn.eval()
val_sum_loss = []
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(tasks_num):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
pred_lable = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
# pred_lable = np.zeros_like(y_pred.cpu().detach().numpy(), dtype=int)
# pred_lable[np.where(np.asarray(y_pred.cpu().detach().numpy()) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred)
except:
y_true_task[i] = []
y_pred_task[i] = []
# y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
# y_pred_task_score[i].extend(y_pred.cpu().detach().numpy())
val_sum_loss.append(loss.cpu().detach().numpy())
val_avg_loss = np.array(val_sum_loss).mean()
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task[i])[0]) for i in
range(tasks_num)]
# acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(tasks_num)]
print("epoch:", epoch, " val " "avg_loss:", val_avg_loss,
# "acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " val_auc: ", trn_roc,
" val_auc: ", np.array(trn_roc).mean(),
# " val_pr: ", trn_prc,
" val_pr: ", np.array(trn_prc).mean())
# 保存模型
if val_avg_loss < test_best_loss:
test_best_loss = val_avg_loss
PATH = 'tox/lstm_net.pth'
print("test save model")
torch.save(rnn.state_dict(), PATH)
with torch.no_grad():
rnn.eval()
test_sum_loss = []
y_true_task = {}
y_pred_task = {}
y_pred_task_score = {}
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
for i in range(tasks_num):
validId = np.where((tmp_y[:, i].cpu().numpy() == 0) | (tmp_y[:, i].cpu().numpy() == 1))[0]
if len(validId) == 0:
continue
y_pred = outputs[:, i * 2:(i + 1) * 2][torch.tensor(validId)].to(device)
y_label = tmp_y[:, i][torch.tensor(validId)].long().to(device)
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function[i](y_pred, y_label)
y_pred_s = F.softmax(y_pred.detach().cpu(), dim=-1)[:, 1].view(-1).numpy()
pred_lable = np.zeros_like(y_pred_s, dtype=int)
pred_lable[np.where(np.asarray(y_pred_s) > 0.5)] = 1
try:
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
except:
y_true_task[i] = []
y_pred_task[i] = []
y_pred_task_score[i] = []
y_true_task[i].extend(y_label.cpu().numpy())
y_pred_task[i].extend(pred_lable)
y_pred_task_score[i].extend(y_pred_s)
test_sum_loss.append(loss.cpu().detach().numpy())
trn_roc = [metrics.roc_auc_score(y_true_task[i], y_pred_task_score[i]) for i in range(tasks_num)]
trn_prc = [metrics.auc(precision_recall_curve(y_true_task[i], y_pred_task_score[i])[1],
precision_recall_curve(y_true_task[i], y_pred_task_score[i])[0]) for i in
range(tasks_num)]
# print(len(trn_roc))
# print(sum(y_true_task[0]))
# print(sum(y_pred_task[0]))
acc = [metrics.accuracy_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# recall = [metrics.recall_score(y_true_task[i], y_pred_task[i]) for i in range(tasks_num)]
# specificity = [cm[i][0, 0] / (cm[i][0, 0] + cm[i][0, 1]) for i in range(tasks_num)]
print("epoch:", epoch, " test " "avg_loss:", np.array(test_sum_loss).mean(),
"acc: ", np.array(acc).mean(),
# "recall: ", np.array(recall).mean(),
# "specificity: ", np.array(specificity).mean(),
# " test_auc: ", trn_roc,
" test_auc: ", np.array(trn_roc).mean(),
# " test_pr: ", trn_prc,
" test_pr: ", np.array(trn_prc).mean())
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/tox21/get_tox_data.py | .py | 5,169 | 179 | import sys
sys.path.append('./')
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import os
# NR-AR NR-AR-LBD NR-AhR NR-Aromatase
# NR-ER NR-ER-LBD NR-PPAR-gamma SR-ARE
# SR-ATAD5 SR-HSE SR-MMP SR-p53
dict_label = {"NR-AR":0,
"NR-AR-LBD":1,
"NR-AhR":2,
"NR-Aromatase":3,
"NR-ER":4,
"NR-ER-LBD":5,
"NR-PPAR-gamma":6,
"SR-ARE":7,
"SR-ATAD5":8,
"SR-HSE":9,
"SR-MMP":10,
"SR-p53":11,}
# step 1
filepath="tox/tox21.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
all_label = []
all_smi = []
w_file = open("tox/tox.smi", mode='w',encoding="utf-8")
for line in df.values:
smi = line[13].strip()
if len(smi) <= 0:
break
all_label.append(line[:12])
all_smi.append(smi)
w_file.write(smi+"\n")
w_file.close()
# step 2
adb = "mol2vec corpus -i tox/tox.smi -o tox/tox.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("tox/tox_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
index = -1
mols_path = "tox/tox.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, "tox/label.pkl")
joblib.dump(smi, "tox/smi.pkl")
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': False,
'test_dataset': "tox/tox_tran.cp_UNK",
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_best",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
# 'vocab_size': 121,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 300,
'hidden_units_size': 150,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join("data/datasets", parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, "tox/tox_embed.pkl")
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/lipop/train2.py | .py | 33,437 | 779 | from rdkit import Chem
import torch
import os
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
# from paper_data.plot_morgan import main
import numpy as np
import seaborn as sns
import math
import pickle
import random
from rdkit.Chem import MolFromSmiles
from AttentiveFP.Featurizer import *
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.optim as optim
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from AttentiveFP import Fingerprint, Fingerprint_viz, save_smiles_dicts, get_smiles_dicts, get_smiles_array, moltosvg_highlight
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
p_dropout = 0.2
fingerprint_dim = 200
# also known as l2_regularization_lambda
weight_decay = 5
learning_rate = 2.5
# for regression model
output_units_num = 1
radius = 2
T = 2
smilesList = ['CC']
degrees = [0, 1, 2, 3, 4, 5]
class MolGraph(object):
def __init__(self):
self.nodes = {} # dict of lists of nodes, keyed by node type
def new_node(self, ntype, features=None, rdkit_ix=None):
new_node = Node(ntype, features, rdkit_ix)
self.nodes.setdefault(ntype, []).append(new_node)
return new_node
def add_subgraph(self, subgraph):
old_nodes = self.nodes
new_nodes = subgraph.nodes
for ntype in set(old_nodes.keys()) | set(new_nodes.keys()):
old_nodes.setdefault(ntype, []).extend(new_nodes.get(ntype, []))
def sort_nodes_by_degree(self, ntype):
nodes_by_degree = {i : [] for i in degrees}
for node in self.nodes[ntype]:
nodes_by_degree[len(node.get_neighbors(ntype))].append(node)
new_nodes = []
for degree in degrees:
cur_nodes = nodes_by_degree[degree]
self.nodes[(ntype, degree)] = cur_nodes
new_nodes.extend(cur_nodes)
self.nodes[ntype] = new_nodes
def feature_array(self, ntype):
assert ntype in self.nodes
return np.array([node.features for node in self.nodes[ntype]])
def rdkit_ix_array(self):
return np.array([node.rdkit_ix for node in self.nodes['atom']])
def neighbor_list(self, self_ntype, neighbor_ntype):
assert self_ntype in self.nodes and neighbor_ntype in self.nodes
neighbor_idxs = {n : i for i, n in enumerate(self.nodes[neighbor_ntype])}
return [[neighbor_idxs[neighbor]
for neighbor in self_node.get_neighbors(neighbor_ntype)]
for self_node in self.nodes[self_ntype]]
class Node(object):
__slots__ = ['ntype', 'features', '_neighbors', 'rdkit_ix']
def __init__(self, ntype, features, rdkit_ix):
self.ntype = ntype
self.features = features
self._neighbors = []
self.rdkit_ix = rdkit_ix
def add_neighbors(self, neighbor_list):
for neighbor in neighbor_list:
self._neighbors.append(neighbor)
neighbor._neighbors.append(self)
def get_neighbors(self, ntype):
return [n for n in self._neighbors if n.ntype == ntype]
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if args in self.cache:
return self.cache[args]
else:
result = self.func(*args)
self.cache[args] = result
return result
def __get__(self, obj, objtype):
return partial(self.__call__, obj)
def graph_from_smiles(smiles):
graph = MolGraph()
mol = MolFromSmiles(smiles)
if not mol:
raise ValueError("Could not parse SMILES string:", smiles)
atoms_by_rd_idx = {}
for atom in mol.GetAtoms():
new_atom_node = graph.new_node('atom', features=atom_features(atom), rdkit_ix=atom.GetIdx())
atoms_by_rd_idx[atom.GetIdx()] = new_atom_node
for bond in mol.GetBonds():
atom1_node = atoms_by_rd_idx[bond.GetBeginAtom().GetIdx()]
atom2_node = atoms_by_rd_idx[bond.GetEndAtom().GetIdx()]
new_bond_node = graph.new_node('bond', features=bond_features(bond))
new_bond_node.add_neighbors((atom1_node, atom2_node))
atom1_node.add_neighbors((atom2_node,))
mol_node = graph.new_node('molecule')
mol_node.add_neighbors(graph.nodes['atom'])
return graph
def array_rep_from_smiles(molgraph):
"""Precompute everything we need from MolGraph so that we can free the memory asap."""
#molgraph = graph_from_smiles_tuple(tuple(smiles))
degrees = [0,1,2,3,4,5]
arrayrep = {'atom_features' : molgraph.feature_array('atom'),
'bond_features' : molgraph.feature_array('bond'),
'atom_list' : molgraph.neighbor_list('molecule', 'atom'),
'rdkit_ix' : molgraph.rdkit_ix_array()}
for degree in degrees:
arrayrep[('atom_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int)
arrayrep[('bond_neighbors', degree)] = \
np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int)
return arrayrep
def gen_descriptor_data(smilesList):
smiles_to_fingerprint_array = {}
for i, smiles in enumerate(smilesList):
# if i > 5:
# print("Due to the limited computational resource, submission with more than 5 molecules will not be processed")
# break
smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True)
try:
molgraph = graph_from_smiles(smiles)
molgraph.sort_nodes_by_degree('atom')
arrayrep = array_rep_from_smiles(molgraph)
smiles_to_fingerprint_array[smiles] = arrayrep
except:
print(smiles,"%%%%%%%%")
# time.sleep(3)
return smiles_to_fingerprint_array
def save_smiles_dicts(smilesList, filename):
# first need to get the max atom length
max_atom_len = 0
max_bond_len = 0
num_atom_features = 0
num_bond_features = 0
smiles_to_rdkit_list = {}
smiles_to_fingerprint_features = gen_descriptor_data(smilesList)
for smiles, arrayrep in smiles_to_fingerprint_features.items():
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
rdkit_list = arrayrep['rdkit_ix']
smiles_to_rdkit_list[smiles] = rdkit_list
atom_len, num_atom_features = atom_features.shape
bond_len, num_bond_features = bond_features.shape
if atom_len > max_atom_len:
max_atom_len = atom_len
if bond_len > max_bond_len:
max_bond_len = bond_len
# then add 1 so I can zero pad everything
max_atom_index_num = max_atom_len
max_bond_index_num = max_bond_len
max_atom_len += 1
max_bond_len += 1
smiles_to_atom_info = {}
smiles_to_bond_info = {}
smiles_to_atom_neighbors = {}
smiles_to_bond_neighbors = {}
smiles_to_atom_mask = {}
degrees = [0, 1, 2, 3, 4, 5]
# then run through our numpy array again
for smiles, arrayrep in smiles_to_fingerprint_features.items():
mask = np.zeros((max_atom_len))
# get the basic info of what
# my atoms and bonds are initialized
atoms = np.zeros((max_atom_len, num_atom_features))
bonds = np.zeros((max_bond_len, num_bond_features))
# then get the arrays initlialized for the neighbors
atom_neighbors = np.zeros((max_atom_len, len(degrees)))
bond_neighbors = np.zeros((max_atom_len, len(degrees)))
# now set these all to the last element of the list, which is zero padded
atom_neighbors.fill(max_atom_index_num)
bond_neighbors.fill(max_bond_index_num)
atom_features = arrayrep['atom_features']
bond_features = arrayrep['bond_features']
for i, feature in enumerate(atom_features):
mask[i] = 1.0
atoms[i] = feature
for j, feature in enumerate(bond_features):
bonds[j] = feature
atom_neighbor_count = 0
bond_neighbor_count = 0
working_atom_list = []
working_bond_list = []
for degree in degrees:
atom_neighbors_list = arrayrep[('atom_neighbors', degree)]
bond_neighbors_list = arrayrep[('bond_neighbors', degree)]
if len(atom_neighbors_list) > 0:
for i, degree_array in enumerate(atom_neighbors_list):
for j, value in enumerate(degree_array):
atom_neighbors[atom_neighbor_count, j] = value
atom_neighbor_count += 1
if len(bond_neighbors_list) > 0:
for i, degree_array in enumerate(bond_neighbors_list):
for j, value in enumerate(degree_array):
bond_neighbors[bond_neighbor_count, j] = value
bond_neighbor_count += 1
# then add everything to my arrays
smiles_to_atom_info[smiles] = atoms
smiles_to_bond_info[smiles] = bonds
smiles_to_atom_neighbors[smiles] = atom_neighbors
smiles_to_bond_neighbors[smiles] = bond_neighbors
smiles_to_atom_mask[smiles] = mask
del smiles_to_fingerprint_features
feature_dicts = {}
# feature_dicts['smiles_to_atom_mask'] = smiles_to_atom_mask
# feature_dicts['smiles_to_atom_info']= smiles_to_atom_info
feature_dicts = {
'smiles_to_atom_mask': smiles_to_atom_mask,
'smiles_to_atom_info': smiles_to_atom_info,
'smiles_to_bond_info': smiles_to_bond_info,
'smiles_to_atom_neighbors': smiles_to_atom_neighbors,
'smiles_to_bond_neighbors': smiles_to_bond_neighbors,
'smiles_to_rdkit_list': smiles_to_rdkit_list
}
pickle.dump(feature_dicts, open(filename + '.pickle', "wb"))
print('feature dicts file saved as ' + filename + '.pickle')
return feature_dicts
def split_data(x, y, all_smi, lens, k_fold):
y = np.array(y, dtype=np.float64)
all_smi = np.array(all_smi)
lens = np.array(lens)
# save_path = 'lipop/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('lipop/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(4, True, 100)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'lipop/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
train_split_lens = lens[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
val_split_lens = lens[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
test_split_lens = lens[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi,val_split_lens,\
test_split_x, test_split_y, test_split_smi,test_split_lens
def get_smiles_array(smilesList, feature_dicts):
x_mask = []
x_atom = []
x_bonds = []
x_atom_index = []
x_bond_index = []
for smiles in smilesList:
x_mask.append(feature_dicts['smiles_to_atom_mask'][smiles])
x_atom.append(feature_dicts['smiles_to_atom_info'][smiles])
x_bonds.append(feature_dicts['smiles_to_bond_info'][smiles])
x_atom_index.append(feature_dicts['smiles_to_atom_neighbors'][smiles])
x_bond_index.append(feature_dicts['smiles_to_bond_neighbors'][smiles])
return np.asarray(x_atom),np.asarray(x_bonds),np.asarray(x_atom_index),\
np.asarray(x_bond_index),np.asarray(x_mask),feature_dicts['smiles_to_rdkit_list']
class Fingerprint(nn.Module):
def __init__(self, radius, T, input_feature_dim, input_bond_dim, \
fingerprint_dim, output_units_num, p_dropout):
super(Fingerprint, self).__init__()
# graph attention for atom embedding
self.atom_fc = nn.Linear(input_feature_dim, fingerprint_dim)
self.neighbor_fc = nn.Linear(input_feature_dim + input_bond_dim, fingerprint_dim)
self.GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for r in range(radius)])
self.align = nn.ModuleList([nn.Linear(2 * fingerprint_dim, 1) for r in range(radius)])
self.attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for r in range(radius)])
# graph attention for molecule embedding
self.mol_GRUCell = nn.GRUCell(fingerprint_dim, fingerprint_dim)
self.mol_align = nn.Linear(2 * fingerprint_dim, 1)
self.mol_attend = nn.Linear(fingerprint_dim, fingerprint_dim)
# you may alternatively assign a different set of parameter in each attentive layer for molecule embedding like in atom embedding process.
# self.mol_GRUCell = nn.ModuleList([nn.GRUCell(fingerprint_dim, fingerprint_dim) for t in range(T)])
# self.mol_align = nn.ModuleList([nn.Linear(2*fingerprint_dim,1) for t in range(T)])
# self.mol_attend = nn.ModuleList([nn.Linear(fingerprint_dim, fingerprint_dim) for t in range(T)])
self.dropout = nn.Dropout(p=p_dropout)
self.output = nn.Linear(fingerprint_dim, output_units_num)
self.radius = radius
self.T = T
def forward(self, atom_list, bond_list, atom_degree_list, bond_degree_list, atom_mask):
atom_mask = atom_mask.unsqueeze(2)
batch_size, mol_length, num_atom_feat = atom_list.size()
atom_feature = F.leaky_relu(self.atom_fc(atom_list))
bond_neighbor = [bond_list[i][bond_degree_list[i]] for i in range(batch_size)]
bond_neighbor = torch.stack(bond_neighbor, dim=0)
atom_neighbor = [atom_list[i][atom_degree_list[i]] for i in range(batch_size)]
atom_neighbor = torch.stack(atom_neighbor, dim=0)
# then concatenate them
neighbor_feature = torch.cat([atom_neighbor, bond_neighbor], dim=-1)
neighbor_feature = F.leaky_relu(self.neighbor_fc(neighbor_feature))
# generate mask to eliminate the influence of blank atoms
attend_mask = atom_degree_list.clone()
attend_mask[attend_mask != mol_length - 1] = 1
attend_mask[attend_mask == mol_length - 1] = 0
attend_mask = attend_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
softmax_mask = atom_degree_list.clone()
softmax_mask[softmax_mask != mol_length - 1] = 0
softmax_mask[softmax_mask == mol_length - 1] = -9e8 # make the softmax value extremly small
softmax_mask = softmax_mask.type(torch.cuda.FloatTensor).unsqueeze(-1)
batch_size, mol_length, max_neighbor_num, fingerprint_dim = neighbor_feature.shape
atom_feature_expand = atom_feature.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[0](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[0](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = atom_feature.view(batch_size * mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[0](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
for d in range(self.radius - 1):
# bonds_indexed = [bond_list[i][torch.cuda.LongTensor(bond_degree_list)[i]] for i in range(batch_size)]
neighbor_feature = [activated_features[i][atom_degree_list[i]] for i in range(batch_size)]
# neighbor_feature is a list of 3D tensor, so we need to stack them into a 4D tensor first
neighbor_feature = torch.stack(neighbor_feature, dim=0)
atom_feature_expand = activated_features.unsqueeze(-2).expand(batch_size, mol_length, max_neighbor_num,
fingerprint_dim)
feature_align = torch.cat([atom_feature_expand, neighbor_feature], dim=-1)
align_score = F.leaky_relu(self.align[d + 1](self.dropout(feature_align)))
# print(attention_weight)
align_score = align_score + softmax_mask
attention_weight = F.softmax(align_score, -2)
# print(attention_weight)
attention_weight = attention_weight * attend_mask
# print(attention_weight)
neighbor_feature_transform = self.attend[d + 1](self.dropout(neighbor_feature))
# print(features_neighbor_transform.shape)
context = torch.sum(torch.mul(attention_weight, neighbor_feature_transform), -2)
# print(context.shape)
context = F.elu(context)
context_reshape = context.view(batch_size * mol_length, fingerprint_dim)
# atom_feature_reshape = atom_feature.view(batch_size*mol_length, fingerprint_dim)
atom_feature_reshape = self.GRUCell[d + 1](context_reshape, atom_feature_reshape)
atom_feature = atom_feature_reshape.view(batch_size, mol_length, fingerprint_dim)
# do nonlinearity
activated_features = F.relu(atom_feature)
mol_feature = torch.sum(activated_features * atom_mask, dim=-2)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_softmax_mask = atom_mask.clone()
mol_softmax_mask[mol_softmax_mask == 0] = -9e8
mol_softmax_mask[mol_softmax_mask == 1] = 0
mol_softmax_mask = mol_softmax_mask.type(torch.cuda.FloatTensor)
for t in range(self.T):
mol_prediction_expand = activated_features_mol.unsqueeze(-2).expand(batch_size, mol_length, fingerprint_dim)
mol_align = torch.cat([mol_prediction_expand, activated_features], dim=-1)
mol_align_score = F.leaky_relu(self.mol_align(mol_align))
mol_align_score = mol_align_score + mol_softmax_mask
mol_attention_weight = F.softmax(mol_align_score, -2)
mol_attention_weight = mol_attention_weight * atom_mask
# print(mol_attention_weight.shape,mol_attention_weight)
activated_features_transform = self.mol_attend(self.dropout(activated_features))
# aggregate embeddings of atoms in a molecule
mol_context = torch.sum(torch.mul(mol_attention_weight, activated_features_transform), -2)
# print(mol_context.shape,mol_context)
mol_context = F.elu(mol_context)
mol_feature = self.mol_GRUCell(mol_context, mol_feature)
# print(mol_feature.shape,mol_feature)
# do nonlinearity
activated_features_mol = F.relu(mol_feature)
mol_prediction = self.output(self.dropout(mol_feature))
return atom_feature, mol_prediction, mol_feature
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self, model):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.model = model
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
batch_first=True,)
#
# # self.fc1 = nn.Linear(512, 1024)
# # self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512 + 200, 1)
# self.fc5 = nn.Linear(200, 1)
self.dropout = nn.Dropout(p=0.3)
def forward(self, x, x_lens, tmp_smi):
# print(self.matrix1, self.matrix2, self.matrix3)
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
x = x.to(device)
x = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
#
x = self.fc(x.to(device)).to(device)
# packing
# embed_packed = pack_padded_sequence(x, x_lens,
# batch_first=True,
# enforce_sorted=False)
out, (hidden, cell) = self.lstm(x) #h_state是之前的隐层状态
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
feature_dicts)
atoms_prediction, mol_prediction, mol_feature = self.model(torch.Tensor(x_atom).to(device),
torch.Tensor(x_bonds).to(device),
torch.cuda.LongTensor(x_atom_index),
torch.cuda.LongTensor(x_bond_index),
torch.Tensor(x_mask).to(device))
# unpacking
# out, lens = pad_packed_sequence(out, batch_first=True)
alpha_n =0
att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
out_tmp = self.fc3(out)
out_tmp = F.leaky_relu(out_tmp)
out_tmp = self.dropout(out_tmp)
out_tmp = torch.cat((out_tmp.view(-1, 512), mol_feature.view(-1, 200)), dim=1)
out_tmp = self.fc4(out_tmp)
# out_tmp = self.fc5(mol_feature)
# outputs = []
# for i, out_tmp in enumerate(out):
# # out_tmp = torch.mean(out_tmp[:lens[i],:], dim=0).squeeze()
# out_tmp = out_tmp[lens[i]-1,:]
# out_tmp = self.fc3(out_tmp)
# out_tmp = F.leaky_relu(out_tmp)
# out_tmp = self.dropout(out_tmp)
# out_tmp = self.fc4(out_tmp)
# outputs.append(out_tmp)
# out = torch.stack(outputs, dim=0)
return out_tmp, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi, len):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
self.len = len
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item], self.len[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 1000
batch_size = 64
best_loss = 100000
test_best_loss = 100000
weight_decay = 1e-5
momentum = 0.9
b = 0.04
seed = 188
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# filepath = "lipop/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('lipop/label.pkl')
all_smi = np.array(joblib.load('lipop/smi.pkl'))
x = joblib.load('lipop/lipop_embed.pkl')
lens = joblib.load('lipop/lens.pkl')
# 5-Fold
train_split_x, train_split_y, train_split_smi, train_split_lens,\
val_split_x, val_split_y, val_split_smi, val_split_lens,\
test_split_x, test_split_y, test_split_smi, test_split_lens = split_data(x, y, all_smi, lens, 3)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi, train_split_lens)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi, val_split_lens)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi, test_split_lens)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
data_all = MyDataset(x, y, all_smi, lens)
dataset_all = data.DataLoader(dataset=data_all, batch_size=1, shuffle=True)
raw_filename = "lipop/Lipophilicity.csv"
feature_filename = raw_filename.replace('.csv','.pickle')
filename = raw_filename.replace('.csv','')
prefix_filename = raw_filename.split('/')[-1].replace('.csv','')
smiles_tasks_df = pd.read_csv(raw_filename)
smilesList = smiles_tasks_df.smiles.values
print("number of all smiles: ", len(smilesList))
atom_num_dist = []
remained_smiles = []
canonical_smiles_list = []
for smiles in smilesList:
try:
mol = Chem.MolFromSmiles(smiles)
atom_num_dist.append(len(mol.GetAtoms()))
remained_smiles.append(smiles)
canonical_smiles_list.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles), isomericSmiles=True))
except:
print(smiles,"######3")
pass
feature_filename = 'lipop/Lipophilicity'
# if os.path.isfile(feature_filename):
# print("NO lipop/delaney-processed.pickle")
# feature_dicts = pickle.load(open(feature_filename, "rb"))
# else:
feature_dicts = save_smiles_dicts(smilesList, feature_filename)
x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(
[canonical_smiles_list[0]], feature_dicts)
num_atom_features = x_atom.shape[-1]
num_bond_features = x_bonds.shape[-1]
model = Fingerprint(radius, T, num_atom_features, num_bond_features,
fingerprint_dim, output_units_num, p_dropout)
model.to(device)
rnn = LSTM(model).to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters()), lr=learning_rate)
optimizer = torch.optim.SGD(list(rnn.parameters()),
lr=learning_rate, weight_decay = weight_decay,
momentum = momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
# print(task_matrix[0], task_matrix[1], task_matrix[2])
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
optimizer.zero_grad()
# x_atom, x_bonds, x_atom_index, x_bond_index, x_mask, smiles_to_rdkit_list = get_smiles_array(tmp_smi,
# feature_dicts)
# atoms_prediction, outputs, mol_feature = rnn(torch.Tensor(x_atom).to(device),
# torch.Tensor(x_bonds).to(device),
# torch.cuda.LongTensor(x_atom_index),
# torch.cuda.LongTensor(x_bond_index),
# torch.Tensor(x_mask).to(device))
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'lipop/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[1], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'lipop/lstm_net.pth')
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
loss = 0
outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device),tmp_smi)
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mse = mean_squared_error(all_label, all_pred)
mae = mean_absolute_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," mae : ", mae
," rmse : ", rmse)
# if rmse < 0.7:
# print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
#
# rnn.eval()
# for index, tmp in enumerate(dataset_all):
# tmp_compound, tmp_y, tmp_smi, tmp_len = tmp
# outputs, alpha_n, att_n = rnn(tmp_compound.to(device), tmp_len.to(device), tmp_smi)
# print(outputs.cpu().detach().numpy()[0][0], tmp_y.cpu().detach().numpy()[0], tmp_smi[0])
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/lipop/train.py | .py | 14,921 | 386 | from rdkit import Chem
import torch
import torch.nn as nn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
import torch.utils.data as data
import pandas as pd
from sklearn.externals import joblib
# from paper_data.plot_morgan import main
import numpy as np
import seaborn as sns
import math
import random
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_len(smi):
mol = Chem.MolFromSmiles(smi)
smiles = Chem.MolToSmiles(mol)
mol = Chem.MolFromSmiles(smiles)
mol_atoms = [a.GetIdx() for a in mol.GetAtoms()]
return len(mol_atoms)
def pack_sequences(X, order=None):
lengths = np.array([x.shape[0] for x in X])
features = X[0].shape[1]
n = len(X)
if order is None:
order = np.argsort(lengths)[::-1] # 从后向前取反向的元素
m = max(lengths)
X_block = X[0].new(n, m, features).zero_()
for i in range(n):
j = order[i]
x = X[j]
X_block[i, :len(x), :] = x
return X_block, order
def unpack_sequences(X, order):
X, lengths = pad_packed_sequence(X, batch_first=True)
X_block = torch.zeros(size=X.size()).to(device)
for i in range(len(order)):
j = order[i]
X_block[j] = X[i]
return X_block
def split_data(x, y, all_smi, k_fold):
y = np.array(y, dtype=np.float64)
# save_path = 'bace2/'+str(k_fold)+'-fold-index.pkl'
# if os.path.isfile(save_path):
# index = joblib.load(save_path)
# train_split_x = x[index["train_index"]]
# train_split_y = y[index["train_index"]]
# val_split_x = x[index["val_index"]]
# val_split_y = y[index["val_index"]]
# test_split_x = x[index["test_index"]]
# test_split_y = y[index["test_index"]]
# train_weights = joblib.load('bace2/train_weights.pkl')
# return train_split_x, train_split_y, val_split_x, val_split_y, test_split_x, test_split_y, train_weights
kf = KFold(5, True, 100)
train_index = [[],[],[],[],[]]
val_index = [[],[],[],[],[]]
test_index = [[],[],[],[],[]]
for k, tmp in enumerate(kf.split(x)):
# train_tmp is the index ofnegative_index
train_tmp, test_tmp = tmp
train_index[k].extend(train_tmp)
num_t = int(len(test_tmp)/2)
val_index[k].extend(test_tmp[0:num_t])
test_index[k].extend(test_tmp[num_t:])
for i in range(5):
joblib.dump({"train_index":train_index[i],
"val_index": val_index[i],
"test_index": test_index[i],
}, 'lipop/'+str(i+1)+'-fold-index.pkl')
train_split_x = x[train_index[k_fold]]
train_split_y = y[train_index[k_fold]]
train_split_smi = all_smi[train_index[k_fold]]
val_split_x = x[val_index[k_fold]]
val_split_y = y[val_index[k_fold]]
val_split_smi = all_smi[val_index[k_fold]]
test_split_x = x[test_index[k_fold]]
test_split_y = y[test_index[k_fold]]
test_split_smi = all_smi[test_index[k_fold]]
return train_split_x, train_split_y, train_split_smi,\
val_split_x, val_split_y, val_split_smi,\
test_split_x, test_split_y, test_split_smi
class LSTM(nn.Module):
"""搭建rnn网络"""
def __init__(self):
super(LSTM, self).__init__()
self.matrix = nn.Parameter(torch.tensor([0.33, 0.33, 0.33]), requires_grad=True)
self.fc = nn.Linear(600, 1024)
self.lstm = nn.LSTM(
input_size=1024,
hidden_size=1024,
num_layers=2,
dropout=0.3,
batch_first=True)
# self.fc1 = nn.Linear(512, 1024)
# self.fc2 = nn.Linear(128, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc4 = nn.Linear(512, 1)
self.dropout = nn.Dropout(p=0.5)
# self.matrix1 = Variable(torch.tensor(0.33), requires_grad=True)
# self.matrix2 = Variable(torch.tensor(0.33), requires_grad=True)
# self.matrix3 = Variable(torch.tensor(0.33), requires_grad=True)
# self.att_encoder = SelfAttention(350, 1)
# self.att_dense = nn.Linear(512)
# self.output_layer = nn.Dense(1)
# self.bn1 = nn.BatchNorm1d(1024)
# self.bn2 = nn.BatchNorm1d(256)
# self.bn3 = nn.BatchNorm1d(128)
def attention_net(self, x, query, mask=None):
d_k = query.size(-1) # d_k为query的维度
# query:[batch, seq_len, hidden_dim*2], x.t:[batch, hidden_dim*2, seq_len]
# print("query: ", query.shape, x.transpose(1, 2).shape) # torch.Size([128, 38, 128]) torch.Size([128, 128, 38])
# 打分机制 scores: [batch, seq_len, seq_len]
scores = torch.matmul(query, x.transpose(1, 2)) / math.sqrt(d_k)
# print("score: ", scores.shape) # torch.Size([128, 38, 38])
# 对最后一个维度 归一化得分
alpha_n = F.softmax(scores, dim=-1)
# print("alpha_n: ", alpha_n.shape) # torch.Size([128, 38, 38])
# 对权重化的x求和
# [batch, seq_len, seq_len]·[batch,seq_len, hidden_dim*2] = [batch,seq_len,hidden_dim*2] -> [batch, hidden_dim*2]
context = torch.matmul(alpha_n, x).sum(1)
att = torch.matmul(x, context.unsqueeze(2))/ math.sqrt(d_k)
att = torch.sigmoid(att.squeeze())
return context, alpha_n, att
def forward(self, x):
# print(self.matrix1, self.matrix2, self.matrix3)
# bs = len(x)
# length = np.array([t.shape[0] for t in x])
x = x.to(device)
out = self.matrix[0] * x[:, 0, :, :] + self.matrix[1] * x[:, 1, :, :] + self.matrix[2] * x[:, 2, :, :]
out = self.fc(out.to(device)).to(device)
# changed_length1 = length[orderD]
# x = pack_padded_sequence(x, changed_length1, batch_first=True)
out,(h_n, c_n) = self.lstm(out.to(device)) #h_state是之前的隐层状态
# query = self.dropout(out)
# #
# # 加入attention机制
# attn_output, alpha_n, att = self.attention_net(out, query)
alpha_n =0
att =0
# out,hidden = self.lstm(x.to(device)) #h_state是之前的隐层状态
# out = torch.cat((h_n[-1, :, :], h_n[-2, :, :]), dim=-1)
# out1 = unpack_sequences(rnn_out, orderD)
# for i in range(bs):
# out1[i,length[i]:-1,:] = 0
out = torch.mean(out, dim=1).squeeze()
# out = out[:,-1,:]
#进行全连接
# out = self.fc1(out[:,-1,:])
# out = F.leaky_relu(out)
# out = F.dropout(out, p=0.3)
# out = self.fc2(out)
# out = F.leaky_relu(out)
# out = F.dropout(out, p=0.3)
out = self.fc3(out)
out = F.leaky_relu(out)
out = self.dropout(out)
out = self.fc4(out)
# return F.softmax(out,dim=-1)
return out, alpha_n, att
class MyDataset(data.Dataset):
def __init__(self, compound, y, smi):
super(MyDataset, self).__init__()
self.compound = compound
# self.compound = torch.FloatTensor(compound)
# self.y = torch.FloatTensor(y)
self.y = y
self.smi = smi
def __getitem__(self, item):
return self.compound[item], self.y[item], self.smi[item]
def __len__(self):
return len(self.compound)
if __name__ == '__main__':
# 设置超参数
input_size = 512
num_layers = 2 # 定义超参数rnn的层数,层数为1层
hidden_size = 512 # 定义超参数rnn的循环神经元个数,个数为32个
learning_rate = 0.01 # 定义超参数学习率
epoch_num = 1000
batch_size = 32
best_loss = 10000
test_best_loss = 1000
weight_decay = 1e-5
momentum = 0.9
b = 0.051
# filepath = "lipop/delaney.csv"
# df = pd.read_csv(filepath, header=0, encoding="gbk")
y = joblib.load('lipop/label.pkl')
all_smi = np.array(joblib.load('lipop/smi.pkl'))
x = joblib.load('lipop/lipop_embed.pkl')
seed = 199
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# 5-Fold
train_split_x, train_split_y, train_split_smi, \
val_split_x, val_split_y, val_split_smi, \
test_split_x, test_split_y, test_split_smi = split_data(x, y, all_smi, 3)
data_train = MyDataset(train_split_x, train_split_y, train_split_smi)
dataset_train = data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_val = MyDataset(val_split_x, val_split_y, val_split_smi)
dataset_val = data.DataLoader(dataset=data_val, batch_size=batch_size, shuffle=True)
data_test = MyDataset(test_split_x, test_split_y, test_split_smi)
dataset_test = data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)
rnn = LSTM().to(device)
#使用adam优化器进行优化,输入待优化参数rnn.parameters,优化学习率为learning_rate
# optimizer = torch.optim.Adam(list(rnn.parameters())+[matrix1, matrix2, matrix3], lr=learning_rate)
optimizer = torch.optim.SGD(rnn.parameters(),
lr=learning_rate, weight_decay = weight_decay,
momentum = momentum)
loss_function = nn.MSELoss().to(device)
# 按照以下的过程进行参数的训练
for epoch in range(epoch_num):
avg_loss = 0
sum_loss = 0
rnn.train()
for index, tmp in enumerate(dataset_train):
tmp_compound, tmp_y, tmp_smi = tmp
optimizer.zero_grad()
outputs, alpha_n, att_n = rnn(tmp_compound)
# print(matrix1,matrix2,matrix3)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
# loss = (loss - b).abs() + b
loss.backward()
optimizer.step()
sum_loss += loss
# print("epoch:", epoch, "index: ", index,"loss:", loss.item())
avg_loss = sum_loss / (index + 1)
print("epoch:", epoch," train " "avg_loss:", avg_loss.item())
# # 保存模型
# if avg_loss < best_loss:
# best_loss = avg_loss
# PATH = 'lipop/lstm_net.pth'
# print("train save model")
# torch.save(rnn.state_dict(), PATH)
# print(task_matrix[0], task_matrix[2], task_matrix[2])
with torch.no_grad():
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
for index, tmp in enumerate(dataset_val):
tmp_compound, tmp_y, tmp_smi = tmp
outputs, alpha_n, att_n = rnn(tmp_compound)
# print(outputs.flatten())
loss = loss_function(outputs.flatten(), tmp_y.type(torch.FloatTensor).to(device))
test_sum_loss += loss.item()
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch," val ", "avg_loss: ", test_avg_loss)
# 保存模型
if test_avg_loss < test_best_loss:
test_best_loss = test_avg_loss
print("test save model")
torch.save(rnn.state_dict(), 'lipop/lstm_net.pth')
att_flag = False
# if test_avg_loss < 0.5:
# att_flag = True
rnn.eval()
test_avg_loss = 0
test_sum_loss = 0
all_pred = []
all_label = []
for index, tmp in enumerate(dataset_test):
tmp_compound, tmp_y, tmp_smi = tmp
loss = 0
outputs, alpha_n, att_n = rnn(tmp_compound)
# out_label = F.softmax(outputs, dim=1)
# pred = out_label.data.max(1, keepdim=True)[1].view(-1).cpu().numpy()
# pred_score = [x[tmp_y.cpu().detach().numpy()[i]] for i, x in enumerate(out_label.cpu().detach().numpy())]
# y_pred.extend(pred)
# y_pred_score.extend(pred_score)
if att_flag:
att = alpha_n.cpu().detach().numpy()
for att_i in range(alpha_n.shape[0]):
smi_len = get_len(tmp_smi[att_i])
if smi_len > 40:
continue
att_tmp = att[att_i,:smi_len*2,:smi_len*2]
att_heatmap = att_tmp[1::2, 1::2]
att_heatmap = (att_heatmap - att_heatmap.min()) / (att_heatmap.max() - att_heatmap.min())
# f, (ax1, ax2) = plt.subplots(figsize=(6, 6), nrows=1)
# if "O=C1NC(=O)C(N1)(c2ccccc2)c3ccccc3".__eq__(tmp_smi[att_i]):
# joblib.dump(att_heatmap, 'lipop/att'+str(epoch)+'.pkl')
fig = sns.heatmap(att_heatmap, cmap='OrRd')
# plt.show()
scatter_fig = fig.get_figure()
try:
scatter_fig.savefig("lipop/att_img/"+str(tmp_smi[att_i])+".png", dpi=400)
except:
continue
finally:
plt.close()
att_word_tmp = att_n[att_i,:smi_len*2].cpu().detach().numpy()
att_word = att_word_tmp[1::2]
# if max(att_word) > 0.1:
a = []
for index,i in enumerate(att_word):
a.append(str(index)+",1,"+str(1-i)+","+str(1-i))
main(tmp_smi[att_i], a, [], "lipop/att_word/"+str(tmp_smi[att_i])+".png")
y_pred = outputs.to(device).view(-1)
y_label = tmp_y.float().to(device).view(-1)
all_label.extend(y_label.cpu().numpy())
all_pred.extend(y_pred.cpu().numpy())
# y_pred = torch.sigmoid(y_pred).view(-1)
# y_label = F.one_hot(y_label, 2).float().to(device)
loss += loss_function(y_pred, y_label)
test_sum_loss += loss.item()
mse = mean_squared_error(all_label, all_pred)
rmse = np.sqrt(mse)
test_avg_loss = test_sum_loss / (index + 1)
print("epoch:", epoch, " test avg_loss:", test_avg_loss
," rmse : ", rmse)
| Python |
3D | lvqiujie/Mol2Context-vec | tasks/lipop/get_data.py | .py | 5,704 | 184 | import pandas as pd
from sklearn.externals import joblib
import numpy as np
from rdkit import Chem
from rdkit.Chem import Descriptors
import os
# step 1
filepath="lipop/Lipophilicity.csv"
df = pd.read_csv(filepath, header=0, encoding="gbk")
w_file = open("lipop/lipop.smi", mode='w',encoding="utf-8")
all_label = []
all_smi = []
for line in df.values:
smi = Chem.MolToSmiles(Chem.MolFromSmiles(line[2]), isomericSmiles=True)
if len(smi) <= 0:
break
all_smi.append(smi)
all_label.append(line[1])
w_file.write(smi + "\n")
# mol = Chem.MolFromSmiles(smi)
# try:
# if 12 <= Descriptors.MolWt(mol) <= 600:
# if -5 <= Descriptors.MolLogP(mol) <= 7:
# flag = True
# for t in mol.GetAtoms():
# if t.GetSymbol() not in ["H", "B", "C", "N", "O", "F", "P", "S", "Cl", "Br", "I", "Si"]:
# flag = False
# print("############ ",smi,t.GetSymbol())
# break
# if flag:
# all_smi.append(smi)
# all_label.append(line[1])
# w_file.write(smi + "\n")
# except:
# print("error "+smi)
w_file.close()
# step 2
adb = "mol2vec corpus -i lipop/lipop.smi -o lipop/lipop.cp -r 1 -j 4 --uncommon UNK --threshold 3"
d = os.popen(adb)
f = d.read()
print(f)
# step 3
vocab_path = "data/datasets/my_smi/smi_tran.vocab"
vocab = {line.split()[0]: int(line.split()[1]) for line in open(vocab_path).readlines()}
sentence_maxlen = 80
w_file = open("lipop/lipop_tran.cp_UNK", mode='w', encoding="utf-8")
label = []
smi = []
lens = []
index = -1
mols_path = "lipop/lipop.cp_UNK"
mols_file = open(mols_path, mode='r',encoding="utf-8")
while True:
line = mols_file.readline().strip()
words = line.split()
index += 1
if "None".__eq__(line.strip()) or "UNK".__eq__(line.strip()):
continue
if not line:
break
token_ids = np.zeros((sentence_maxlen,), dtype=np.int64)
# Add begin of sentence index
token_ids[0] = vocab['<bos>']
for j, token in enumerate(line.split()[:sentence_maxlen - 2]):
# print(token)
if token.lower() in vocab:
token_ids[j + 1] = vocab[token.lower()]
else:
token_ids[j + 1] = vocab['<unk>']
# Add end of sentence index
if token_ids[1]:
token_ids[j + 2] = vocab['<eos>']
# print(token_ids)
label.append(all_label[index])
smi.append(all_smi[index])
lens.append(len(words) if len(words) + 2 <= sentence_maxlen else 80)
w_file.write(" ".join(str(i) for i in token_ids).strip()+"\n")
w_file.close()
joblib.dump(label, 'lipop/label.pkl')
joblib.dump(smi, 'lipop/smi.pkl')
joblib.dump(lens, 'lipop/lens.pkl')
# step 4
import os
import keras.backend as K
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from data import DATA_SET_DIR
from context_vec.smi_generator import SMIDataGenerator
from context_vec.smi_model import context_vec
import tensorflow as tf
from tensorflow import keras
from sklearn.externals import joblib
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)
parameters = {
'multi_processing': False,
'n_threads': 4,
'cuDNN': True if len(K.tensorflow_backend._get_available_gpus()) else False,
'test_dataset': 'lipop/lipop_tran.cp_UNK',
'vocab': 'my_smi/smi_tran.vocab',
'model_dir': "smi_context_vec_512",
'vocab_flag': False,
'uncommon_threshold': 3,
# 'vocab_size': 28914,
# 'vocab_size': 748,
'vocab_size': 13576,
'num_sampled': 100,
# 'charset_size': 262,
'sentence_maxlen': 80,
'token_maxlen': 50,
'token_encoding': 'word',
'epochs': 1000,
'patience': 2,
'batch_size': 512,
'test_batch_size': 512,
'clip_value': 1,
'cell_clip': 5,
'proj_clip': 5,
'lr': 0.2,
'shuffle': False,
'n_lstm_layers': 2,
'n_highway_layers': 2,
'cnn_filters': [[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'lstm_units_size': 512,
'hidden_units_size': 300,
'char_embedding_size': 16,
'dropout_rate': 0.1,
'word_dropout_rate': 0.05,
'weight_tying': True,
}
test_generator = SMIDataGenerator(parameters['test_dataset'],
os.path.join(DATA_SET_DIR, parameters['vocab']),
sentence_maxlen=parameters['sentence_maxlen'],
token_maxlen=parameters['token_maxlen'],
batch_size=parameters['test_batch_size'],
shuffle=parameters['shuffle'],
token_encoding=parameters['token_encoding'])
# Compile context_vec
context_vec_model = context_vec(parameters)
context_vec_model.compile_context_vec()
# context_vec_model.load(sampled_softmax=False)
#
# # Evaluate Bidirectional Language Model
# context_vec_model.evaluate(test_generator, parameters['test_batch_size'])
#
# # Build context_vec meta-model to deploy for production and persist in disk
# context_vec_model.wrap_multi_context_vec_encoder(print_summary=True)
# Load context_vec encoder
context_vec_model.load_context_vec_encoder()
# Get context_vec embeddings to feed as inputs for downstream tasks
context_vec_embeddings = context_vec_model.get_outputs(test_generator, output_type='word', state='all')
print(context_vec_embeddings.shape)
# 保存x
joblib.dump(context_vec_embeddings, 'lipop/lipop_embed.pkl')
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.