keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
2D | janberges/elphmod | examples/bare/run.sh | .sh | 583 | 25 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using Hartwigsen-Goedecker-Hutter pseudopotentials'
echo '[1] Hartwigsen et al., Phys. Rev. B 58, 3641 (1998)'
echo '[2] Goedecker et al., Phys. Rev. B 54, 1703 (1996)'
url=https://pseudopotentials.quantum-espresso.org/upf_files
for pp in S.pbe-hgh.UPF Ta.pbe-hgh.UPF
do
test -e $pp || wget $url/$pp
done
nk=2
mpirun pw.x -nk $nk < pw.in | tee pw.out
mpirun ph.x -nk $nk < ph.in | tee ph.out
mpirun python3 bare.py
| Shell |
2D | janberges/elphmod | examples/projwfc/projwfc.py | .py | 1,349 | 52 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
colors = ['red', 'blue', 'black']
labels = ['$s$', '$p_{x, y}$', '$p_z$']
x, k, eps, proj = elphmod.el.read_atomic_projections(
'work/graphene.save/atomic_proj.xml', order=True)
eps *= elphmod.misc.Ry
orbitals = elphmod.el.read_projwfc_out('projwfc.out')
width = 0.5 * elphmod.el.proj_sum(proj, orbitals, 's', 'p{x, y}', 'pz')
pwi = elphmod.bravais.read_pwi('scf.in')
mu = elphmod.el.read_Fermi_level('scf.out')
path = 'GMKG'
K, X, corners = elphmod.bravais.path(path, N=100, **pwi)
X *= x[-1] / X[-1]
el = elphmod.el.Model('graphene')
E, order = elphmod.dispersion.dispersion(el.H, K, order=True)
E -= mu
if elphmod.MPI.comm.rank != 0:
raise SystemExit
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(X[corners], path)
for n in range(eps.shape[1]):
fatbands = elphmod.plot.compline(x, eps[:, n], width[:, n, :])
for fatband, color, label in zip(fatbands, colors, labels):
plt.fill(*fatband, color=color, linewidth=0.0,
label=None if n else label)
for n in range(el.size):
plt.plot(X, E[:, n], 'y:', label=None if n else 'W90')
plt.legend()
plt.savefig('projwfc.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/projwfc/run.sh | .sh | 798 | 29 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=C.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun pw.x -nk $nk < bands.in | tee bands.out
mpirun projwfc.x -nk $nk < projwfc.in | tee projwfc.out
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun -n 1 wannier90.x -pp graphene
mpirun pw2wannier90.x < pw2w90.in | tee pw2w90.out
mpirun -n 1 wannier90.x graphene
mpirun python3 projwfc.py
| Shell |
2D | janberges/elphmod | examples/phrenorm/run.sh | .sh | 933 | 42 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using Hartwigsen-Goedecker-Hutter pseudopotentials'
echo '[1] Hartwigsen et al., Phys. Rev. B 58, 3641 (1998)'
echo '[2] Goedecker et al., Phys. Rev. B 54, 1703 (1996)'
url=https://pseudopotentials.quantum-espresso.org/upf_files
for pp in S.pbe-hgh.UPF Ta.pbe-hgh.UPF
do
test -e $pp || wget $url/$pp
done
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
for method in dfpt cdfpt
do
mpirun ph.x -nk $nk < $method.in | tee $method.out
fildyn=$method.dyn dvscf_dir=$method.save ph2epw
done
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
for method in dfpt cdfpt
do
mpirun -n $nk epw.x -nk $nk < epw-$method.in | tee epw-$method.out
mv work/TaS2.epmatwp $method.epmatwp
done
mpirun python3 phrenorm.py
mpirun python3 defpot.py
mpirun python3 decay.py
| Shell |
2D | janberges/elphmod | examples/phrenorm/decay.py | .py | 690 | 26 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
# Based on code by Arne Schobert.
import elphmod
import matplotlib.pyplot as plt
pwi = elphmod.bravais.read_pwi('scf.in')
R1, H1 = elphmod.el.read_decayH('decay.H')
R2, H2 = elphmod.el.decayH('TaS2', **pwi)
if elphmod.MPI.comm.rank == 0:
plt.plot(R1, H1, 'o', color='blue', markersize=10, label='EPW output')
plt.plot(R2, H2, 'o', color='orange',
label='calculated from Wannier90 data')
plt.ylabel('Hopping (eV)')
plt.xlabel(r'Distance ($\mathrm{\AA}$)')
plt.legend()
plt.savefig('decay.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/phrenorm/phrenorm.py | .py | 2,914 | 118 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod
import matplotlib.pyplot as plt
import numpy as np
comm = elphmod.MPI.comm
info = elphmod.MPI.info
PW = elphmod.bravais.read_pwi('scf.in')
PH = elphmod.bravais.read_ph('dfpt.in')
kT = PW['degauss'] * elphmod.misc.Ry
f = elphmod.occupations.smearing(**PW)
nk = PW['k_points'][0]
nq = PH['nq1']
nel = 1
info('Prepare wave vectors')
q = sorted(elphmod.bravais.irreducibles(nq))
q = 2 * np.pi * np.array(q, dtype=float) / nq
path = 'GMKG'
k, x, corners = elphmod.bravais.path(path, N=340, **PW)
info('Prepare electrons')
el = elphmod.el.Model('TaS2')
mu = elphmod.el.read_Fermi_level('scf.out')
e, U = elphmod.dispersion.dispersion_full_nosym(el.H, nk, vectors=True)
e = e[..., :nel] - mu
U = U[..., :nel]
info('Prepare phonons')
ph = dict()
for method in 'cdfpt', 'dfpt':
ph[method] = elphmod.ph.Model('%s.dyn' % method, apply_asr_simple=True,
lr=False)
info('Prepare electron-phonon coupling')
g = dict()
for method in sorted(ph):
elph = elphmod.elph.Model('%s.epmatwp' % method, 'wigner.fmt',
el, ph[method])
g[method] = elph.sample(q, U=U) * elphmod.misc.Ry ** 1.5
info('Calculate phonon self-energy')
Pi = elphmod.diagrams.phonon_self_energy(q, e, g=g['cdfpt'], G=g['dfpt'],
kT=kT, occupations=f) / elphmod.misc.Ry ** 2
info('Renormalize phonons')
D = elphmod.dispersion.sample(ph['cdfpt'].D, q)
ph['pp'] = copy.copy(ph['dfpt'])
elphmod.ph.q2r(ph['pp'], D + Pi, q, nq, apply_asr_simple=True)
info('Plot electrons')
e, U, order = elphmod.dispersion.dispersion(el.H, k, vectors=True, order=True)
e -= mu
number = iter(range(1, 99))
if comm.rank == 0:
proj = 0.1 * abs(U) ** 2
for n in range(el.size):
fatbands = elphmod.plot.compline(x, e[:, n], proj[:, :, n])
for fatband, color in zip(fatbands, 'rgb'):
plt.fill(*fatband, color=color, linewidth=0.0)
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('phrenorm_%d.png' % next(number))
plt.show()
info('Plot cDFPT, DFPT and renormalized phonons')
for method in sorted(ph):
w2, u, order = elphmod.dispersion.dispersion(ph[method].D, k,
vectors=True, order=True)
w = elphmod.ph.sgnsqrt(w2) * elphmod.misc.Ry * 1e3
if comm.rank == 0:
proj = elphmod.ph.polarization(u, k)
for nu in range(ph[method].size):
fatbands = elphmod.plot.compline(x, w[:, nu], proj[:, nu])
for fatband, color in zip(fatbands, 'ymk'):
plt.fill(*fatband, color=color, linewidth=0.0)
plt.ylabel('Phonon energy (meV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('phrenorm_%d.png' % next(number))
plt.show()
| Python |
2D | janberges/elphmod | examples/phrenorm/defpot.py | .py | 971 | 37 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
import numpy as np
nu = 8 # ionic displacement
a = 0 # electronic orbital
el = elphmod.el.Model('TaS2')
ph = elphmod.ph.Model('dfpt.dyn', apply_asr_simple=True)
elph = elphmod.elph.Model('dfpt.epmatwp', 'wigner.fmt', el, ph,
divide_mass=False)
path = 'GMKG'
k, x, corners = elphmod.bravais.path(path, ibrav=4)
g = np.empty(len(k), dtype=complex)
for ik, (k1, k2, k3) in enumerate(k):
g[ik] = elph.g(k1=k1, k2=k2, k3=k3)[nu, a, a]
g *= elphmod.misc.Ry / elphmod.misc.a0
if elphmod.MPI.comm.rank == 0:
plt.ylabel(r'$\langle \vec k d_{z^2}| '
r'\partial V / \partial z_{\mathrm{S}} '
r'|\vec k d_{z^2} \rangle$ '
r'($\mathrm{eV/\AA}$)')
plt.xticks(x[corners], path)
plt.plot(x, g.real)
plt.savefig('defpot.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/quadrupole/quadrupole.py | .py | 4,282 | 183 | #!/usr/bin/env python3
import elphmod
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
comm = elphmod.MPI.comm
info = elphmod.MPI.info
info('Load tight-binding, mass-spring, and coupling models')
el = elphmod.el.Model('TaS2')
ph = elphmod.ph.Model('dyn', lr=False)
elph = elphmod.elph.Model('work/TaS2.epmatwp', 'wigner.fmt', el, ph)
elph.sample_orig()
info('Set up q-point path')
path = 'GM'
q, x, corners = elphmod.bravais.path(path, ibrav=4, N=198, moveG=1e-2)
q0, x0, w0 = elphmod.el.read_bands('dynref.freq')
q0 = 2 * np.pi * np.dot(q0, ph.a.T) / np.linalg.norm(ph.a[0])
x0 += x[-1] - x0[-1]
info('Load reference data')
sqrtM = np.sqrt(np.repeat(ph.M, 3))
D0 = np.empty((len(q0), ph.size, ph.size), dtype=complex)
for iq in range(len(q0)):
D0[iq] = elphmod.ph.read_flfrc('dynref%d' % (iq + 1))[0][1][0]
D0 /= sqrtM[np.newaxis, np.newaxis, :]
D0 /= sqrtM[np.newaxis, :, np.newaxis]
g0 = np.empty((len(q0), ph.size), dtype=complex)
iq = 0
with open('phref.out') as lines:
for line in lines:
if 'Printing the electron-phonon matrix elements' in line:
next(lines)
next(lines)
for line in lines:
cols = line.split()
if not cols:
break
i, m, n, k1, k2, k3 = tuple(map(int, cols[:6]))
if m == n == 13 and k1 == k2 == k3 == 0:
g0[iq, i - 1] = float(cols[6]) + 1j * float(cols[7])
iq += 1
g0 /= sqrtM[np.newaxis, :]
info('Optimize long-range separation parameter')
ph.lr = True
def objective(L):
ph.L, = L
ph.update_short_range()
return ph.sum_force_constants()
scipy.optimize.minimize(objective, [1.0], tol=0.1)
elph.update_short_range()
info('Interpolate dynamical matrix and coupling for Q = 0')
def sample(q):
D = elphmod.dispersion.sample(ph.D, q)
g = elphmod.dispersion.sample(elph.g, q, elbnd=True,
comm=elphmod.MPI.I)[:, :, 0, 0]
return D, g
Dd, gd = sample(q)
info('Optimze quadrupole tensors')
def error():
D, g = sample(q0)
dD = (abs(D - D0) ** 2).sum()
dg = (abs(abs(g) - abs(g0)) ** 2).sum()
return dD, dg
dD0, dg0 = error()
def objective(Q):
ph.Q = np.zeros((ph.nat, 3, 3, 3))
ph.Q[1, 1, 1, 1] = Q[0] # Ta y y y
ph.Q[2, 1, 1, 1] = Q[1] # S y y y
ph.Q[2, 2, 1, 1] = Q[2] # S z y y
ph.Q[:, 0, 0, 1] = ph.Q[:, 0, 1, 0] = ph.Q[:, 1, 0, 0] = -ph.Q[:, 1, 1, 1]
ph.Q[:, 2, 0, 0] = ph.Q[:, 2, 1, 1]
ph.Q[0, :2] = ph.Q[2, :2]
ph.Q[0, 2] = -ph.Q[2, 2]
ph.Q[Q == 0.0] = 0.0 # avoid negative zeros
ph.update_short_range()
elph.update_short_range()
dD, dg = error()
dD /= dD0
dg /= dg0
info('error(D) = %.10g%%' % (100 * dD))
info('error(g) = %.10g%%' % (100 * dg))
return np.sqrt(dD ** 2 + dg ** 2)
scipy.optimize.minimize(objective, np.ones(3), tol=1e-3)
info('Interpolate dynamical matrix and coupling for optimal Q')
Dq, gq = sample(q)
info('Plot results')
if comm.rank != 0:
raise SystemExit
elphmod.ph.write_quadrupole_fmt('quadrupole.fmt', ph.Q)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
wd2, ud = np.linalg.eigh(Dd)
wq2, uq = np.linalg.eigh(Dq)
w02, u0 = np.linalg.eigh(D0)
ax1.plot(x, elphmod.ph.sgnsqrt(wd2) * 1e3 * elphmod.misc.Ry, 'r')
ax1.plot(x, elphmod.ph.sgnsqrt(wq2) * 1e3 * elphmod.misc.Ry, 'b')
ax1.plot(x0, elphmod.ph.sgnsqrt(w02) * 1e3 * elphmod.misc.Ry, 'ko')
gd = np.einsum('qx,qxv->qv', gd, ud)
gq = np.einsum('qx,qxv->qv', gq, uq)
g0 = np.einsum('qx,qxv->qv', g0, u0)
gd = np.sort(abs(gd), axis=1)
gq = np.sort(abs(gq), axis=1)
g0 = np.sort(abs(g0), axis=1)
for nu in range(ph.size):
ax2.plot(x, gd[:, nu] * elphmod.misc.Ry ** 1.5, 'r',
label=None if nu else '$Z^*$ only')
for nu in range(ph.size):
ax2.plot(x, gq[:, nu] * elphmod.misc.Ry ** 1.5, 'b',
label=None if nu else '$Z^*$ and $Q$')
for nu in range(ph.size):
ax2.plot(x0, g0[:, nu] * elphmod.misc.Ry ** 1.5, 'ko',
label=None if nu else 'reference')
ax1.set_ylabel('Phonon energy (meV)')
ax2.set_ylabel('Electron-phonon coupling (eV$^{3/2}$)')
ax2.set_xlabel('Wave vector')
ax2.set_xticks(x[corners])
ax2.set_xticklabels(path)
ax2.legend()
plt.savefig('quadrupole.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/quadrupole/run.sh | .sh | 803 | 33 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard # [1, 2]
for pp in Ta.upf S.upf
do
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
done
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun ph.x -nk $nk < ph.in | tee ph.out
ph2epw
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun -n $nk epw.x -nk $nk < epw.in | tee epw.out
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun ph.x -nk $nk < phref.in | tee phref.out
mpirun python3 quadrupole.py
| Shell |
2D | janberges/elphmod | examples/fluctuations/run.sh | .sh | 732 | 31 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using Hartwigsen-Goedecker-Hutter pseudopotentials'
echo '[1] Hartwigsen et al., Phys. Rev. B 58, 3641 (1998)'
echo '[2] Goedecker et al., Phys. Rev. B 54, 1703 (1996)'
url=https://pseudopotentials.quantum-espresso.org/upf_files
for pp in S.pbe-hgh.UPF Ta.pbe-hgh.UPF
do
test -e $pp || wget $url/$pp
done
nk=2
mpirun pw.x -nk $nk < pw.in | tee pw.out
mpirun ph.x -nk $nk < ph.in | tee ph.out
mpirun q2r.x < q2r.in | tee q2r.out
ph2epw
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun -n $nk epw.x -nk $nk < epw.in | tee epw.out
mpirun python3 fluctuations.py
| Shell |
2D | janberges/elphmod | examples/fluctuations/fluctuations.py | .py | 2,363 | 76 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
import numpy as np
comm = elphmod.MPI.comm
info = elphmod.MPI.info
q = np.array([[0.0, 2 * np.pi / 3]])
nk = 48
kT = 0.005
BZ = dict(points=200, outside=np.nan)
info('Load tight-binding model, mass-spring model, and coupling')
el = elphmod.el.Model('TaS2')
mu = elphmod.el.read_Fermi_level('pw.out')
ph = elphmod.ph.Model('ifc', apply_asr_simple=True)
elph = elphmod.elph.Model('work/TaS2.epmatwp', 'wigner.fmt', el, ph)
info('Diagonalize Hamiltonian and dynamical matrix and sample coupling')
e, U = elphmod.dispersion.dispersion_full_nosym(el.H, nk, vectors=True)
e -= mu
e /= elphmod.misc.Ry
w2, u = elphmod.dispersion.dispersion(ph.D, q, vectors=True)
LA = np.argwhere(elphmod.ph.polarization(u, q)[0, :, 0] > 0.5).min()
g2 = abs(elph.sample(q, U=U[..., :1], u=u[..., LA:LA + 1])) ** 2
info('Calculate phonon self-energy and bare electronic susceptibility')
Pi = elphmod.diagrams.phonon_self_energy(q, e[..., :1], g2=g2, kT=kT,
occupations=elphmod.occupations.fermi_dirac, fluctuations=True)[1]
X0 = elphmod.diagrams.phonon_self_energy(q, e[..., :1], kT=kT,
occupations=elphmod.occupations.fermi_dirac, fluctuations=True)[1]
info('Map all quantities onto first Brillouin zone')
ek1_BZ = elphmod.plot.toBZ(e[:, :, 0], **BZ)
ek2_BZ = elphmod.plot.toBZ(np.roll(np.roll(e[:, :, 0],
shift=-int(round(q[0, 0] * nk / (2 * np.pi))), axis=0),
shift=-int(round(q[0, 1] * nk / (2 * np.pi))), axis=1), **BZ)
Pi_BZ = -elphmod.plot.toBZ(Pi[0, 0, :, :, 0, 0], **BZ)
X0_BZ = -elphmod.plot.toBZ(X0[0, 0, :, :, 0, 0], **BZ)
g2_BZ = +elphmod.plot.toBZ(g2[0, 0, :, :, 0, 0], **BZ)
info('Plot self-energy, susceptibility, and coupling next to each other')
if comm.rank == 0:
figure, axes = plt.subplots(1, 3)
for n, (title, data) in enumerate([(r'$-2 \omega \Pi$', Pi_BZ),
(r'$-\chi_0$', X0_BZ), (r'$2 \omega g^2$', g2_BZ)]):
axes[n].imshow(data)
axes[n].contour(ek1_BZ, levels=[0.0], colors='k')
axes[n].contour(ek2_BZ, levels=[0.0], colors='k', linestyles=':')
axes[n].set_title(title)
axes[n].axis('image')
axes[n].axis('off')
plt.savefig('fluctuations.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/goldstone/run.sh | .sh | 730 | 26 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=N.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
mpirun pw.x < pw.in | tee pw.out
for method in dfpt cdfpt
do
mpirun ph.x -ndiag 1 < $method.in | tee $method.out
echo "&INPUT fildyn='$method.dyn' flfrc='$method.ifc' /" | mpirun -n 1 q2r.x
cp -rT work/_ph0/N2.phsave $method.phsave
done
mpirun python3 goldstone.py
| Shell |
2D | janberges/elphmod | examples/goldstone/goldstone.py | .py | 2,257 | 85 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.patches as pts
import matplotlib.pyplot as plt
import numpy as np
colors = ['dodgerblue', 'orange']
labels = ['$x, y$', '$z$']
e = elphmod.el.read_pwo('pw.out')[0][0]
nel = len(e)
e = e.reshape((1, 1, nel))
ph = dict()
d = dict()
for method in 'cdfpt', 'dfpt':
ph[method] = elphmod.ph.Model(method + '.ifc')
ph[method].data *= elphmod.misc.Ry ** 2
nph = ph[method].size
d[method] = elphmod.elph.read_xml_files(method + '.phsave/elph.%d.%d.xml',
q=1, rep=nph, bands=range(nel), nbands=nel, nk=1, status=False)
d[method] *= elphmod.misc.Ry ** 1.5
ph['dfpt_asr'] = elphmod.ph.Model('dfpt.ifc', apply_asr=True)
ph['dfpt_asr'].data *= elphmod.misc.Ry ** 2
ph['dfpt_rsr'] = elphmod.ph.Model('dfpt.ifc', apply_rsr=True)
ph['dfpt_rsr'].data *= elphmod.misc.Ry ** 2
q = np.array([[0, 0]])
Pi = elphmod.diagrams.phonon_self_energy(q, e, g=d['cdfpt'], G=d['dfpt'],
occupations=elphmod.occupations.heaviside).reshape((nph, nph))
if elphmod.MPI.comm.rank != 0:
raise SystemExit
D = [
ph['dfpt'].D(),
ph['cdfpt'].D(),
ph['cdfpt'].D() + Pi / ph['dfpt'].M[0],
ph['dfpt_asr'].D(),
ph['dfpt_rsr'].D(),
]
Shorten = 0.1
shorten = 0.01
width = 5.0
plt.axhline(0.0, color='gray', zorder=0)
for n in range(len(D)):
X1 = n - 0.5 + Shorten
X2 = n + 0.5 - Shorten
w2, u = np.linalg.eigh(D[n])
w = elphmod.ph.sgnsqrt(w2) * 1e3
Z = (abs(u[2::3]) ** 2).sum(axis=0) > 0.5
for group in elphmod.misc.group(w, eps=1.1 * width):
N = len(group)
for i, nu in enumerate(group):
x1 = (i * X2 + (N - i) * X1) / N + shorten; i += 1
x2 = (i * X2 + (N - i) * X1) / N - shorten
plt.fill_between([x1, x2], w[nu] - 0.5 * width, w[nu] + 0.5 * width,
linewidth=0.0, color=colors[int(Z[nu])])
plt.ylabel('Phonon energy (meV)')
plt.xticks(range(len(D)),
['DFPT', 'cDFPT', r'cDFPT+$\Pi$', 'DFPT+ASR', 'DFPT+RSR'])
plt.legend(handles=[pts.Patch(color=color, label=label)
for color, label in zip(colors, labels)])
plt.savefig('goldstone.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/projwfc_3d/projwfc_3d.py | .py | 1,091 | 42 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
colors = ['red', 'blue', 'green', 'gray']
labels = ['$s$', '$p$', '$d$', 'other']
x, k, eps, proj = elphmod.el.read_atomic_projections(
'work/polonium.save/atomic_proj.xml', order=False, other=True)
eps *= elphmod.misc.Ry
orbitals = elphmod.el.read_projwfc_out('projwfc.out')
width = 0.5 * elphmod.el.proj_sum(proj, orbitals, 's', 'p', 'd', 'x')
if elphmod.MPI.comm.rank != 0:
raise SystemExit
path = 'GXRMG'
K, X, corners = elphmod.bravais.path(path, ibrav=1)
X *= x[-1] / X[-1]
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(X[corners], path)
for n in range(eps.shape[1]):
fatbands = elphmod.plot.compline(x, eps[:, n], width[:, n, :])
for fatband, color, label in zip(fatbands, colors, labels):
plt.fill(*fatband, color=color, linewidth=0.0,
label=None if n else label)
plt.legend()
plt.savefig('projwfc_3d.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/projwfc_3d/run.sh | .sh | 632 | 23 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=Po.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun projwfc.x -nk $nk < projwfc.in | tee projwfc.out
mpirun python3 projwfc_3d.py
| Shell |
2D | janberges/elphmod | examples/ph_vs_epw/run.sh | .sh | 686 | 30 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using Hartwigsen-Goedecker-Hutter pseudopotentials'
echo '[1] Hartwigsen et al., Phys. Rev. B 58, 3641 (1998)'
echo '[2] Goedecker et al., Phys. Rev. B 54, 1703 (1996)'
url=https://pseudopotentials.quantum-espresso.org/upf_files
for pp in S.pbe-hgh.UPF Ta.pbe-hgh.UPF
do
test -e $pp || wget $url/$pp
done
nk=2
mpirun pw.x -nk $nk < pw.in | tee pw.out
mpirun ph.x -nk $nk < ph.in | tee ph.out
ph2epw
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun -n $nk epw.x -nk $nk < epw.in | tee epw.out
python3 ph_vs_epw.py
| Shell |
2D | janberges/elphmod | examples/ph_vs_epw/ph_vs_epw.py | .py | 1,691 | 57 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import numpy as np
info = elphmod.MPI.info
pwi = elphmod.bravais.read_pwi('pw.in')
nk = np.array(pwi['k_points'][:3])
a = elphmod.bravais.primitives(**pwi)
a /= np.linalg.norm(a[0])
b = elphmod.bravais.reciprocals(*a)
el = elphmod.el.Model('TaS2')
ph = elphmod.ph.Model('dyn')
elph = elphmod.elph.Model('work/TaS2.epmatwp', 'wigner.fmt', el, ph,
divide_mass=False)
with open('ph.out') as lines:
for line in lines:
if 'Calculation of q' in line:
q = np.array([float(c) for c in line.split()[-3:]])[np.newaxis]
info('\nq = (%g, %g, %g)\n' % tuple(q[0]))
q, = 2 * np.pi * elphmod.bravais.cartesian_to_crystal(q, *b)
info('%2s %2s %2s %2s %2s %2s %9s %9s'
% ('i', 'm', 'n', 'k1', 'k2', 'k3', 'd (PH)', 'd (EPW)'))
elif 'Printing the electron-phonon matrix elements' in line:
next(lines)
next(lines)
for line in lines:
cols = line.split()
if not cols:
break
i, m, n = tuple(int(c) - 1 for c in cols[:3])
k1, k2, k3 = tuple(map(int, cols[3:6]))
k = 2 * np.pi / nk * np.array([k1, k2, k3])
if m == n == 8:
g_ph = float(cols[6]) + 1j * float(cols[7])
g_epw = elph.g(*q, *k, elbnd=True, phbnd=False)[i, 0, 0]
info('%2d %2d %2d %2d %2d %2d %9.6f %9.6f'
% (i, m, n, k1, k2, k3, abs(g_ph), abs(g_epw)))
| Python |
2D | janberges/elphmod | examples/modes/modes_modules.py | .py | 3,589 | 118 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
"""
Created on Sun Mar 14 15:15:43 2021
@author: arne
"""
import numpy as np
import matplotlib.pyplot as plt
# Note: returns angle in degree
def theta(v, w):
return np.arccos(v.dot(w)
/ (np.linalg.norm(v) * np.linalg.norm(w))) / np.pi * 180
def supercell_vectors(cdw_data, N1, N2, A, a, a1, a2, a3):
if cdw_data['ibrav'] == 4:
angle = theta(a1, a2)
# Lattice vectors of CDW structure:
A1 = a1 * N1
A3 = a3
eps = 1e-5
for n in range(50):
for m in range(50):
test_lattice_vector = n * a1 + m * a2
if abs(np.linalg.norm(test_lattice_vector) - N2 * a) < eps:
if abs(theta(test_lattice_vector, A1) - angle) < eps:
A2 = n * a1 + m * a2
return A1, A2, A3
elif cdw_data['ibrav'] == 8:
angle = 90
if not isinstance(N1, int):
print('N1 is not an integer')
eps = 1e-8
for n in range(-50, 50):
for m in range(-50, 50):
test_lattice_vector = n * a1 + m * a2
if abs(np.linalg.norm(test_lattice_vector) - N1 * a) < eps:
print(n, m)
plt.plot([0, test_lattice_vector[0]],
[0, test_lattice_vector[1]],
color='black', linewidth=5)
else:
# Lattice vectors of CDW structure:
A1 = N1 * a1
A3 = a3
eps = 1e-5
for n in range(50):
for m in range(50):
test_lattice_vector = n * a1 + m * a2
if abs(np.linalg.norm(test_lattice_vector) - N2 * a) < eps:
if abs(theta(test_lattice_vector, A1) - angle) < eps:
A2 = n * a1 + m * a2
return A1, A2, A3
elif cdw_data['ibrav'] == 0:
v1 = cdw_data['r_cell'][0, :]
v2 = cdw_data['r_cell'][1, :]
v3 = cdw_data['r_cell'][2, :]
# Calculate angle of supercell vectors
angle = theta(a1, a2)
A1 = A * v1
A2 = A * v2
A3 = A * v3
return A1, A2, A3
def permutation_finder(nat, R_cdw, R_sym, at_cdw, at_sym, eps):
permutation = []
# Find permutation
eps = eps
for atom_index_sym in range(nat):
for atom_index_cdw in range(nat):
test_position = R_cdw[atom_index_cdw]
if np.linalg.norm(test_position - R_sym[atom_index_sym]) < eps:
permutation.append(atom_index_cdw)
R_cdw_permuted = np.empty((nat, 3))
at_cdw_permuted = []
for index in range(len(permutation)):
R_cdw_permuted[index] = R_cdw[permutation[index]]
at_cdw_permuted.append(at_cdw[permutation[index]])
at_cdw = at_cdw_permuted
R_cdw = R_cdw_permuted
return R_cdw, at_cdw
def align_structures(nat, R_cdw, R_sym, A1, A2, eps):
# Move all atoms in the CDW structure
# and check if they align with SYM structure
eps = eps
for atom_index_sym in range(nat):
for atom_index_cdw in range(nat):
for m in [-1, 0, 1]:
for n in [-1, 0, 1]:
test_position = R_cdw[atom_index_cdw] + m * A1 + n * A2
if (np.linalg.norm(test_position - R_sym[atom_index_sym])
< eps):
R_cdw[atom_index_cdw] = test_position
return R_cdw
| Python |
2D | janberges/elphmod | examples/modes/run.sh | .sh | 158 | 7 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
mpirun python3 modes.py
| Shell |
2D | janberges/elphmod | examples/modes/modes.py | .py | 7,098 | 237 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
"""
Created on Sun Mar 14 15:15:43 2021
@author: arne
"""
import elphmod
import re
import numpy as np
import sys
from modes_modules import (supercell_vectors, permutation_finder,
align_structures)
import matplotlib.pyplot as plt
comm = elphmod.MPI.comm
symmetrize = True
if comm.rank == 0:
file = open('info.dat', 'w')
material = 'NbSe2'
N1 = 3
N2 = 3
cdw_path = '%s_3x3_CDW.in' % (material)
transition_metal = 'Nb'
chalcogen = 'Se'
if comm.rank == 0:
file.write('Charge-density-wave structure: %d x %d\n' % (N1, N2))
# Load symmetric structure
cdw_data = elphmod.bravais.read_pwi(cdw_path)
R_cdw = cdw_data['r']
nat = cdw_data['nat']
at_cdw = cdw_data['at']
# Load lattice parameters
A = cdw_data['a']
C = cdw_data['c']
a = A / N1
#alat = a / elphmod.misc.a0
if comm.rank == 0:
file.write('Lattice parameter of the unit cell a = %3.12f\n' % a)
# Load lattice translations
a1, a2 = elphmod.bravais.translations(two_dimensional=False)
a1 *= a
a2 *= a
a3 = np.array([0.0, 0.0, C])
# Load real space supercell vectors
A1, A2, A3 = supercell_vectors(cdw_data, N1, N2, A, a, a1, a2, a3)
# Load reciprocal space supercell vectors
B1, B2, B3 = elphmod.bravais.reciprocals(A1, A2, A3)
# Check coordinate type of input
coords_input = cdw_data['coords']
coords_type_QE = ['crystal', 'bohr', 'angstrom', 'alat']
flag_coords_type = False
for coords_type in coords_type_QE:
if re.search(coords_type, coords_input, re.IGNORECASE):
if comm.rank == 0:
file.write('Coordinates are given in %s\n' % coords_type)
flag_coords_type = True
if coords_type == 'crystal':
# Transform from crystal to cartesian coordinates
R_cdw = elphmod.bravais.crystal_to_cartesian(R_cdw, A1, A2, A3)
if not flag_coords_type:
if comm.rank == 0:
file.write('Did not find coordinate type in input file. '
'Stopping program...\n')
sys.exit()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Load mass-spring model and setup symmetric crystal structure:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
ph = elphmod.ph.Model('../data/NbSe2_DFPT.ifc', apply_asr_simple=True)
# cartesian coordinates (angstrom)
tau = ph.r * elphmod.misc.a0
# Set up sym. atomic positions from the IFC file:
R_sym = np.empty((int(round(N1)), int(round(N2)), ph.nat, 3))
for n1 in range(int(round(N1))):
for n2 in range(int(round(N2))):
R_sym[n1, n2] = a1 * n1 + a2 * n2 + tau
at_sym = []
for index in range(int(round(N1)) * int(round(N2))):
for ityp in range(3):
at_sym.append(ph.atom_order[ityp])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Shift and align structures:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
R_sym = R_sym.reshape(R_cdw.shape)
if symmetrize:
# Fold structures into supercell
R_cdw = elphmod.bravais.cartesian_to_crystal(R_cdw, A1, A2, A3)
R_sym = elphmod.bravais.cartesian_to_crystal(R_sym, A1, A2, A3)
R_cdw[:, 0] %= 1
R_cdw[:, 1] %= 1
R_sym[:, 0] %= 1
R_sym[:, 1] %= 1
R_cdw = elphmod.bravais.crystal_to_cartesian(R_cdw, A1, A2, A3)
R_sym = elphmod.bravais.crystal_to_cartesian(R_sym, A1, A2, A3)
# Align structures
R_cdw = align_structures(nat, R_cdw, R_sym, A1, A2, eps=0.5 * a)
original_atom_index = 1
distance_to_original_uc = np.empty((nat))
for atom_index in range(nat):
if at_cdw[atom_index] != at_sym[original_atom_index]:
distance_to_original_uc[atom_index] = 1e10
else:
distance_to_original_uc[atom_index] = np.linalg.norm(
R_cdw[atom_index] - R_sym[original_atom_index])
shift_index = np.argmin(distance_to_original_uc)
shift_vector = (R_cdw[np.argmin(distance_to_original_uc)]
- R_sym[original_atom_index])
R_cdw -= shift_vector
# Fold structures into supercell
R_cdw = elphmod.bravais.cartesian_to_crystal(R_cdw, A1, A2, A3)
R_sym = elphmod.bravais.cartesian_to_crystal(R_sym, A1, A2, A3)
R_cdw[:, 0] %= 1
R_cdw[:, 1] %= 1
R_sym[:, 0] %= 1
R_sym[:, 1] %= 1
R_cdw = elphmod.bravais.crystal_to_cartesian(R_cdw, A1, A2, A3)
R_sym = elphmod.bravais.crystal_to_cartesian(R_sym, A1, A2, A3)
# Align structures
R_cdw = align_structures(nat, R_cdw, R_sym, A1, A2, eps=0.5 * a)
# Calculate the barycenter
BC_sym = 0.0
BC_cdw = 0.0
for ii in range(nat):
BC_sym += R_sym[ii, :] / nat
BC_cdw += R_cdw[ii, :] / nat
R_cdw = (BC_sym - BC_cdw) + R_cdw
if at_cdw != at_sym:
if comm.rank == 0:
file.write('Atom order does not match. Starting permutation...\n')
R_cdw, at_cdw = permutation_finder(nat, R_cdw, R_sym, at_cdw, at_sym,
eps=0.5 * a)
if at_cdw != at_sym:
if comm.rank == 0:
file.write('Atom order still does not match. Stopping program...\n')
sys.exit()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Calculate some important distances:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Total displacement vector
U_tot = np.sqrt(((R_cdw - R_sym) ** 2).sum())
if comm.rank == 0:
file.write('Total displacement vector (angstrom): U = %3.4f\n' % U_tot)
distance = np.empty((nat))
for atom_index in range(nat):
distance[atom_index] = np.linalg.norm(R_cdw[atom_index] - R_sym[atom_index])
if comm.rank == 0:
file.write('Maximal displacement of %s atom is %1.2f %s\n'
% (at_cdw[np.argmax(distance)], np.max(distance) / a * 100, '%'))
file.close()
# Plot structures
if comm.rank != 0:
raise SystemExit
for atom_index in range(nat):
if at_sym[atom_index] == transition_metal:
plt.plot(R_sym[atom_index, 0], R_sym[atom_index, 1], 'o',
color='cyan', markersize=15)
elif at_sym[atom_index] == chalcogen:
plt.plot(R_sym[atom_index, 0], R_sym[atom_index, 1], 'o',
color='orangered', markersize=15)
for atom_index in range(nat):
if at_cdw[atom_index] == transition_metal:
plt.plot(R_cdw[atom_index, 0], R_cdw[atom_index, 1], 'o',
color='darkblue', markersize=15)
elif at_cdw[atom_index] == chalcogen:
plt.plot(R_cdw[atom_index, 0], R_cdw[atom_index, 1], 'o',
color='gold', markersize=15)
Start_Pos = (0, 0)
plt.plot([0, A1[0]], [0, A1[1]], color='black')
plt.plot([0, A2[0]], [0, A2[1]], color='black')
plt.plot([0, A1[0]], [0, A1[1]], color='black')
plt.plot([0, A2[0]], [0, A2[1]], color='black')
plt.plot([Start_Pos[0] + A2[0], Start_Pos[0] + A2[0] + A1[0]],
[Start_Pos[1] + A2[1], Start_Pos[1] + A2[1] + A1[1]], color='black')
plt.plot([Start_Pos[0] + A1[0], Start_Pos[0] + A2[0] + A1[0]],
[Start_Pos[1] + A1[1], Start_Pos[1] + A2[1] + A1[1]], color='black')
plt.axis('off')
plt.axis('equal')
plt.savefig('modes.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/lr/lr.py | .py | 2,950 | 100 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
import numpy as np
import sys
comm = elphmod.MPI.comm
path = 'KGM'
q, x, corners = elphmod.bravais.path(path, ibrav=4, N=50, moveG=0.1)
if len(sys.argv) > 1 and sys.argv[1] == '--prepare-q':
if comm.rank == 0:
q /= 2 * np.pi
weight = 1 / len(q)
with open('q.dat', 'w') as filqf:
filqf.write('%d crystal\n' % len(q))
for q1, q2, q3 in q:
filqf.write('%12.10f %12.10f %12.10f %12.10f\n'
% (q1, q2, q3, weight))
for lr in '3d', 'gaussian':
elphmod.bravais.write_matdyn('matdyn_%s.in' % lr, dict(
flfrc='%s.ifc' % lr,
flfrq='%s.freq' % lr,
nq=len(q),
q=q,
q_in_cryst_coord=True,
asr='simple',
loto_2d=lr != '3d',
fldos=' ',
fleig=' ',
flvec=' ',
))
raise SystemExit
number = iter(range(1, 99))
for lr in '3d', 'gaussian':
ph = elphmod.ph.Model('%s.ifc' % lr, apply_asr_simple=True, apply_zasr=True,
lr2d=lr != '3d', lr=True)
if '3d' in lr:
ph.prepare_long_range(G_2d=True)
w = elphmod.ph.sgnsqrt(elphmod.dispersion.dispersion(ph.D, q))
q0, x0, w0 = elphmod.el.read_bands('%s.freq' % lr)
if comm.rank == 0:
plt.plot(x, w0.T * 1e3 * elphmod.misc.cmm1, 'ok')
plt.plot(x, w * 1e3 * elphmod.misc.Ry, '-k')
plt.title(lr)
plt.ylabel('Phonon energy (meV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('lr_%d.png' % next(number))
plt.show()
el = elphmod.el.Model('MoS2')
for lr in 'no_lr', '3d', 'gaussian', 'dipole_sp', 'quadrupole':
ph = elphmod.ph.Model('dyn', apply_asr_simple=True, apply_zasr=True,
lr=lr != 'no_lr',
lr2d=lr != '3d',
L=elphmod.elph.read_L('epw_%s.out' % lr),
quadrupole_fmt='_quadrupole.fmt' if lr == 'quadrupole' else None)
if '3d' in lr:
ph.prepare_long_range(G_2d=True)
elph = elphmod.elph.Model('%s.epmatwp' % lr, 'wigner.fmt', el, ph)
g = np.absolute([elph.g(q1, q2, q3, elbnd=True, phbnd=True)
for q1, q2, q3 in q])
g = np.sort(g, axis=1)
w0, g0 = elphmod.elph.read_prtgkk('epw_%s.out' % lr,
nq=len(q), nmodes=ph.size, nk=1, nbnd=el.size)
if comm.rank == 0:
plt.plot(x, elphmod.ph.sgnsqrt(2 * w0) * g0[:, :, 0, 0, 0], 'ok')
plt.plot(x, g[:, :, 0, 0] * (1e3 * elphmod.misc.Ry) ** 1.5, '-k')
plt.title(lr)
plt.ylabel('Electron-phonon coupling (meV$^{3 / 2}$)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('lr_%d.png' % next(number))
plt.show()
| Python |
2D | janberges/elphmod | examples/lr/run.sh | .sh | 1,110 | 48 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
for pp in Mo.upf S.upf
do
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
done
nk=2
python3 lr.py --prepare-q
mpirun pw.x -nk $nk < pw.in | tee pw.out
mpirun ph.x -nk $nk < ph.in | tee ph.out
for lr in '3d' 'gaussian'
do
mpirun q2r.x < q2r_$lr.in | tee q2r_$lr.out
mpirun matdyn.x < matdyn_$lr.in | tee matdyn_$lr.out
done
ph2epw
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
for lr in 'no_lr' '3d' 'gaussian' 'dipole_sp' 'quadrupole'
do
test $lr = 'quadrupole' && mv _quadrupole.fmt quadrupole.fmt
mpirun -n $nk epw.x -nk $nk < epw_$lr.in | tee epw_$lr.out
test $lr = 'quadrupole' && mv quadrupole.fmt _quadrupole.fmt
mv work/MoS2.epmatwp $lr.epmatwp
done
mpirun python3 lr.py
| Shell |
2D | janberges/elphmod | examples/phrenorm_3d/phrenorm_3d.py | .py | 2,661 | 109 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod
import matplotlib.pyplot as plt
import numpy as np
comm = elphmod.MPI.comm
info = elphmod.MPI.info
PW = elphmod.bravais.read_pwi('scf.in')
PH = elphmod.bravais.read_ph('dfpt.in')
kT = PW['degauss'] * elphmod.misc.Ry
f = elphmod.occupations.smearing(**PW)
nk = PW['k_points'][:3]
nq = [PH['nq1'], PH['nq2'], PH['nq3']]
info('Prepare wave vectors')
k = elphmod.bravais.mesh(*nk)
q = elphmod.bravais.mesh(*nq)
q_flat = np.reshape(q, (-1, 3))
path = 'GXRMG'
q_path, x, corners = elphmod.bravais.path(path, N=100, moveG=1e-3, **PW)
info('Prepare electrons')
el = elphmod.el.Model('polonium')
mu = elphmod.el.read_Fermi_level('scf.out')
e, U = elphmod.dispersion.dispersion(el.H, k, vectors=True)
e -= mu
info('Prepare phonons')
ph = dict()
for method in 'cdfpt', 'dfpt':
ph[method] = elphmod.ph.Model('%s.dyn' % method, apply_asr_simple=True,
lr=False)
info('Prepare electron-phonon coupling')
g = dict()
for method in sorted(ph):
elph = elphmod.elph.Model('%s.epmatwp' % method, 'wigner.fmt',
el, ph['cdfpt'])
g[method] = elph.sample(q_flat, U=U) * elphmod.misc.Ry ** 1.5
info('Calculate phonon self-energy')
Pi = elphmod.diagrams.phonon_self_energy(q_flat, e, g=g['cdfpt'], G=g['dfpt'],
kT=kT, occupations=f)
Pi = np.reshape(Pi, (nq[0], nq[1], nq[2], ph['cdfpt'].size, ph['cdfpt'].size))
Pi /= elphmod.misc.Ry ** 2
info('Renormalize phonons')
D = elphmod.dispersion.sample(ph['cdfpt'].D, q)
ph['cdfpt+pi'] = copy.copy(ph['dfpt'])
elphmod.ph.q2r(ph['cdfpt+pi'], D_full=D + Pi, apply_asr_simple=True)
info('Plot electrons')
e = elphmod.dispersion.dispersion(el.H, q_path)
e -= mu
if comm.rank == 0:
for n in range(el.size):
plt.plot(x, e[:, n], 'b')
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('phrenorm_3d_1.png')
plt.show()
info('Plot cDFPT, DFPT and renormalized phonons')
for method, label, style in [('dfpt', 'DFPT', 'r'), ('cdfpt', 'cDFPT', 'g'),
('cdfpt+pi', r'cDFPT+$\Pi$', 'b:')]:
w2 = elphmod.dispersion.dispersion(ph[method].D, q_path)
w = elphmod.ph.sgnsqrt(w2) * elphmod.misc.Ry * 1e3
if comm.rank == 0:
for nu in range(ph[method].size):
plt.plot(x, w[:, nu], style, label=None if nu else label)
if comm.rank == 0:
plt.ylabel('Phonon energy (meV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.legend()
plt.savefig('phrenorm_3d_2.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/phrenorm_3d/run.sh | .sh | 1,017 | 41 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=Po.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun projwfc.x -nk $nk < projwfc.in | tee projwfc.out
mpirun pw.x -nk $nk < scf.in | tee scf.out
for method in dfpt cdfpt
do
mpirun ph.x -nk $nk < $method.in | tee $method.out
fildyn=$method.dyn dvscf_dir=$method.save ph2epw
done
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
for method in dfpt cdfpt
do
mpirun -n $nk epw.x -nk $nk < epw-$method.in | tee epw-$method.out
mv work/polonium.epmatwp $method.epmatwp
done
mpirun python3 phrenorm_3d.py
| Shell |
2D | janberges/elphmod | examples/phrenorm_graphene/phrenorm_graphene.py | .py | 2,521 | 106 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod
import matplotlib.pyplot as plt
import numpy as np
comm = elphmod.MPI.comm
info = elphmod.MPI.info
PW = elphmod.bravais.read_pwi('scf.in')
PH = elphmod.bravais.read_ph('dfpt.in')
kT = PW['degauss'] * elphmod.misc.Ry
f = elphmod.occupations.smearing(**PW)
nk = PW['k_points'][0]
nq = PH['nq1']
info('Prepare wave vectors')
q = sorted(elphmod.bravais.irreducibles(nq))
q = 2 * np.pi * np.array(q, dtype=float) / nq
path = 'GMKG'
q_path, x, corners = elphmod.bravais.path(path, N=500, **PW)
info('Prepare electrons')
el = elphmod.el.Model('graphene')
mu = elphmod.el.read_Fermi_level('scf.out')
e, U, order = elphmod.dispersion.dispersion_full_nosym(el.H, nk,
vectors=True, order=True)
e -= mu
info('Prepare phonons')
ph = dict()
for method in 'cdfpt', 'dfpt':
ph[method] = elphmod.ph.Model('%s.dyn' % method, lr=False)
info('Prepare electron-phonon coupling')
g = dict()
for method in sorted(ph):
elph = elphmod.elph.Model('%s.epmatwp' % method, 'wigner.fmt',
el, ph[method])
g[method] = elph.sample(q, U=U) * elphmod.misc.Ry ** 1.5
info('Calculate phonon self-energy')
Pi = elphmod.diagrams.phonon_self_energy(q, e, g=g['cdfpt'], G=g['dfpt'],
kT=kT, occupations=f) / elphmod.misc.Ry ** 2
info('Renormalize phonons')
D = elphmod.dispersion.sample(ph['cdfpt'].D, q)
ph['cdfpt+pi'] = copy.copy(ph['dfpt'])
elphmod.ph.q2r(ph['cdfpt+pi'], D + Pi, q, nq)
info('Plot electrons')
e = elphmod.dispersion.dispersion(el.H, q_path)
e -= mu
if comm.rank == 0:
for n in range(el.size):
plt.plot(x, e[:, n], 'b')
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.savefig('phrenorm_graphene_1.png')
plt.show()
info('Plot cDFPT, DFPT and renormalized phonons')
for method, label, style in [('dfpt', 'DFPT', 'r'), ('cdfpt', 'cDFPT', 'g'),
('cdfpt+pi', r'cDFPT+$\Pi$', 'b:')]:
w2 = elphmod.dispersion.dispersion(ph[method].D, q_path)
w = elphmod.ph.sgnsqrt(w2) * elphmod.misc.Ry * 1e3
if comm.rank == 0:
for nu in range(ph[method].size):
plt.plot(x, w[:, nu], style, label=None if nu else label)
if comm.rank == 0:
plt.ylabel('Phonon energy (meV)')
plt.xlabel('Wave vector')
plt.xticks(x[corners], path)
plt.legend()
plt.savefig('phrenorm_graphene_2.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/phrenorm_graphene/run.sh | .sh | 1,022 | 41 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=C.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun projwfc.x -nk $nk < projwfc.in | tee projwfc.out
mpirun pw.x -nk $nk < scf.in | tee scf.out
for method in dfpt cdfpt
do
mpirun ph.x -nk $nk < $method.in | tee $method.out
fildyn=$method.dyn dvscf_dir=$method.save ph2epw
done
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
for method in dfpt cdfpt
do
mpirun -n $nk epw.x -nk $nk < epw-$method.in | tee epw-$method.out
mv work/graphene.epmatwp $method.epmatwp
done
mpirun python3 phrenorm_graphene.py
| Shell |
2D | janberges/elphmod | examples/projwfc_1d/projwfc_1d.py | .py | 1,369 | 53 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import matplotlib.pyplot as plt
colors = ['red', 'blue', 'black', 'gray']
labels = ['$s$', '$p_{x, y}$', '$p_z$', 'other']
x, k, eps, proj = elphmod.el.read_atomic_projections(
'work/C.save/atomic_proj.xml', order=True)
eps *= elphmod.misc.Ry
orbitals = elphmod.el.read_projwfc_out('projwfc.out')
width = 0.5 * elphmod.el.proj_sum(proj, orbitals, 's', 'p{x, y}', 'pz',
other=True)
pwi = elphmod.bravais.read_pwi('scf.in')
mu = elphmod.el.read_Fermi_level('scf.out')
path = 'GZ'
K, X, corners = elphmod.bravais.path(path, N=100, **pwi)
X *= x[-1] / X[-1]
el = elphmod.el.Model('C')
E, order = elphmod.dispersion.dispersion(el.H, K, order=True)
E -= mu
if elphmod.MPI.comm.rank != 0:
raise SystemExit
plt.ylabel('Electron energy (eV)')
plt.xlabel('Wave vector')
plt.xticks(X[corners], path)
for n in range(eps.shape[1]):
fatbands = elphmod.plot.compline(x, eps[:, n], width[:, n, :])
for fatband, color, label in zip(fatbands, colors, labels):
plt.fill(*fatband, color=color, linewidth=0.0,
label=None if n else label)
for n in range(el.size):
plt.plot(X, E[:, n], 'y:', label=None if n else 'W90')
plt.legend()
plt.savefig('projwfc_1d.png')
plt.show()
| Python |
2D | janberges/elphmod | examples/projwfc_1d/run.sh | .sh | 786 | 28 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
eval `elphmodenv`
echo 'Using normconserving pseudopotentials from PseudoDojo'
echo '[1] van Setten et al., Comput. Phys. Commun. 226, 39 (2018)'
echo '[2] Hamann, Phys. Rev. B 88, 085117 (2013)'
url=http://www.pseudo-dojo.org/pseudos/nc-sr-04_pbe_standard
pp=C.upf
test -e $pp || (wget $url/$pp.gz && gunzip $pp)
nk=2
mpirun pw.x -nk $nk < scf.in | tee scf.out
mpirun pw.x -nk $nk < bands.in | tee bands.out
mpirun projwfc.x -nk $nk < projwfc.in | tee projwfc.out
mpirun pw.x -nk $nk < nscf.in | tee nscf.out
mpirun -n 1 wannier90.x -pp C
mpirun pw2wannier90.x < pw2w90.in | tee pw2w90.out
mpirun -n 1 wannier90.x C
mpirun python3 projwfc_1d.py
| Shell |
2D | janberges/elphmod | examples/md/run.sh | .sh | 189 | 9 | #!/bin/bash
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
cp `which i-pi-driver-py` ipi_driver.py
python3 md.py
| Shell |
2D | janberges/elphmod | examples/md/md.py | .py | 933 | 37 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod.models.tas2
import subprocess
import time
try:
import ipi_driver
except ModuleNotFoundError:
import ipi._driver.driver as ipi_driver
el, ph, elph = elphmod.models.tas2.create(rydberg=True, divide_mass=False)
driver = elphmod.md.Driver(elph, nk=(12, 12), nq=(2, 2), supercell=(9, 9),
kT=0.02, f=elphmod.occupations.marzari_vanderbilt, n=1.0)
driver.kT = 0.005
driver.f = elphmod.occupations.fermi_dirac
driver.random_displacements(amplitude=0.05, reproducible=True)
driver.to_xyz('init.xyz')
subprocess.Popen(['i-pi', 'input.xml'])
time.sleep(2) # wait for i-PI to load and create a socket
driver.plot(interactive=True)
ipi_driver.run_driver(unix=True, address='localhost', driver=driver)
driver.plot(interactive=False)
driver.plot(filename='md.png')
| Python |
2D | janberges/elphmod | tests/test_misc.py | .py | 490 | 18 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import unittest
class TestMisc(unittest.TestCase):
def test_split(self):
"""Test factorizing expression with separators and brackets."""
self.assertEqual(['dz2', 'dxz', 'dyz', 'dx2-y2', 'dxy'],
list(elphmod.misc.split('d{z2, {x,y}z, x2-y2, xy}')))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_models.py | .py | 1,501 | 53 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod.models.chain
import elphmod.models.graphene
import elphmod.models.tas2
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestModels(unittest.TestCase):
def test_chain(self):
"""Check if chain hopping decreases with increasing bond length."""
el, ph, elph = elphmod.models.chain.create(rydberg=True,
divide_mass=False)
R = (1, 0, 0)
self.assertTrue(np.all(elph.gR(*R, *R)[0] / el.t(*R) < 0))
def test_graphene(self):
"""Check if graphene hopping decreases with increasing bond length."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True,
divide_mass=False)
R = (0, 1, 0)
ratio = -elphmod.models.graphene.beta / elphmod.models.graphene.tau
self.assertTrue(ratio < 0)
self.assertTrue(np.allclose(elph.gR(*R, *R)[1], el.t(*R) * ratio))
def test_tas2(self):
"""Check if TMDC hopping decreases with increasing bond length."""
el, ph, elph = elphmod.models.tas2.create(rydberg=True,
divide_mass=False)
R = (1, 0, 0)
ratio = -elphmod.models.tas2.beta / np.linalg.norm(elph.ph.a[0])
self.assertTrue(ratio < 0)
self.assertTrue(np.allclose(elph.gR(*R, *R)[0], el.t(*R) * ratio))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_el.py | .py | 2,086 | 65 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod.models.graphene
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestElectron(unittest.TestCase):
def test_electron_cell_transforms(self, nk=4, N=2):
"""Verify that cell transformations leave electron energies untoched."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True,
divide_mass=False)
ref = np.sort(elphmod.dispersion.dispersion_full(el.H, nk), axis=None)
el = el.supercell(N, N)
e = elphmod.dispersion.dispersion_full(el.H, nk // N)
self.assertTrue(np.allclose(np.sort(e, axis=None), ref))
el = el.unit_cell()
e = elphmod.dispersion.dispersion_full(el.H, nk)
self.assertTrue(np.allclose(np.sort(e, axis=None), ref))
el.shift_orbitals(0, (-1, 0, 0))
e = elphmod.dispersion.dispersion_full(el.H, nk)
self.assertTrue(np.allclose(np.sort(e, axis=None), ref))
el.order_orbitals(1, 0)
e = elphmod.dispersion.dispersion_full(el.H, nk)
self.assertTrue(np.allclose(np.sort(e, axis=None), ref))
def test_k2r(self):
"""Test Fourier interpolation of Hamiltonian."""
for rydberg in False, True:
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=rydberg,
divide_mass=False)
el2 = copy.copy(el)
H = elphmod.dispersion.sample(el.H, elphmod.models.graphene.k)
elphmod.el.k2r(el2, H, ph.a, ph.r, rydberg=rydberg)
el2.standardize(eps=1e-10)
self.assertTrue(np.allclose(el.data, el2.data))
if rydberg:
H *= elphmod.misc.Ry
else:
H /= elphmod.misc.Ry
elphmod.el.k2r(el2, H, ph.a, ph.r, rydberg=not rydberg)
el2.standardize(eps=1e-10)
self.assertTrue(np.allclose(el.data, el2.data))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_ph.py | .py | 3,493 | 109 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod.models.graphene
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestPhonon(unittest.TestCase):
def test_phonon_cell_transforms(self, nq=4, N=2):
"""Verify that cell transformations leave phonon energies untoched."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True,
divide_mass=False)
ref = np.sort(elphmod.dispersion.dispersion_full(ph.D, nq), axis=None)
ph = ph.supercell(N, N)
w2 = elphmod.dispersion.dispersion_full(ph.D, nq // N)
self.assertTrue(np.allclose(np.sort(w2, axis=None), ref))
ph = ph.unit_cell()
w2 = elphmod.dispersion.dispersion_full(ph.D, nq)
self.assertTrue(np.allclose(np.sort(w2, axis=None), ref))
ph.shift_atoms(0, (-1, 0, 0))
w2 = elphmod.dispersion.dispersion_full(ph.D, nq)
self.assertTrue(np.allclose(np.sort(w2, axis=None), ref))
ph.order_atoms(1, 0)
w2 = elphmod.dispersion.dispersion_full(ph.D, nq)
self.assertTrue(np.allclose(np.sort(w2, axis=None), ref))
def test_q2r(self):
"""Test Fourier interpolation of dynamical matrix."""
for divide_mass in False, True:
el, ph, elph, elel = elphmod.models.graphene.create(
divide_mass=divide_mass)
ph2 = copy.copy(ph)
for irr in False, True:
if irr:
nq = elphmod.models.graphene.nq[0]
q = np.array(sorted(elphmod.bravais.irreducibles(nq)),
dtype=float) * 2 * np.pi / nq
else:
q = elphmod.models.graphene.q
D = elphmod.dispersion.sample(ph.D, q)
if irr:
elphmod.ph.q2r(ph2, D, q, nq, divide_mass=divide_mass)
else:
elphmod.ph.q2r(ph2, D_full=D, divide_mass=divide_mass)
ph2.standardize(eps=1e-10)
self.assertTrue(np.allclose(ph.data, ph2.data))
def test_supercell_phrenorm(self, nq=2, nk=2, N=2,
kT=0.01, f=elphmod.occupations.fermi_dirac):
"""Verify that phonon renormalization and supercell mapping commute."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True)
mu = elphmod.models.graphene.t
data = []
for step in range(2):
q = elphmod.bravais.mesh(nq, nq, flat=True)
e, U = elphmod.dispersion.dispersion_full_nosym(elph.el.H, nk,
vectors=True)
e -= mu
D = elphmod.dispersion.sample(elph.ph.D, q)
g = elph.sample(q, U=U)
D += elphmod.diagrams.phonon_self_energy(q, e, g=g,
kT=kT, occupations=f)
D[0] += elphmod.diagrams.phonon_self_energy_fermi_shift(e, g[0],
kT=kT, occupations=f)
phrenorm = copy.copy(elph.ph)
elphmod.ph.q2r(phrenorm, D_full=D, nq=(nq, nq))
if not step:
phrenorm = phrenorm.supercell(N, N)
elph = elph.supercell(N, N)
nq //= N
nk //= N
phrenorm.standardize()
data.append(phrenorm.data)
self.assertTrue(np.allclose(data[0], data[1]))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_diagrams.py | .py | 5,501 | 161 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod.models.tas2
import numpy as np
import unittest
elphmod.misc.verbosity = 0
tol = dict(rtol=1e-2, atol=0.0)
class TestDiagrams(unittest.TestCase):
def _test_expansion(self, eps=1e-4, nk=(4, 4),
kT=0.01, f=elphmod.occupations.fermi_dirac, n=None):
"""Compare lowest-order diagrams to finite differences."""
k = elphmod.bravais.mesh(*nk)
q = np.zeros((1, 2))
el, ph, elph = elphmod.models.tas2.create(rydberg=True,
divide_mass=False)
g0 = elph.gR()
g0[2, 0, 0] += elph.data.max() # ensure nonzero first-order term
elph = elph.supercell(2, 2)
H = elphmod.dispersion.sample(elph.el.H, k)
e, U = np.linalg.eigh(H)
if n:
e -= elphmod.occupations.find_Fermi_level(n, e, kT, f)
H = e[..., np.newaxis] * np.eye(elph.el.size)
u = 1 - 2 * elphmod.misc.rand(1, elph.ph.size, 1)
elphmod.MPI.comm.Bcast(u)
gu = elph.sample(q=q, U=U, u=u)[0]
prefactor = 2 * kT / np.prod(nk)
def E(H):
"""Compute grand potential or free energy for given Hamiltonian."""
e = np.linalg.eigvalsh(H)
if n:
mu = elphmod.occupations.find_Fermi_level(n, e, kT, f)
e -= mu
Phi = elphmod.diagrams.grand_potential(e, kT, f)
if n:
return Phi + mu * n
return Phi
diff = (E(H + eps * gu) - E(H - eps * gu)) / (2 * eps)
pert = elphmod.diagrams.first_order(e, gu,
kT=kT, occupations=f)[0].real
self.assertTrue(np.allclose(diff, pert, **tol))
diff = (E(H + eps * gu) - 2 * E(H) + E(H - eps * gu)) / eps ** 2
pert = elphmod.diagrams.phonon_self_energy(q, e, g2=abs(gu) ** 2,
kT=kT, occupations=f)[0, 0].real
if n:
pert += elphmod.diagrams.phonon_self_energy_fermi_shift(e, gu,
kT=kT, occupations=f)[0, 0].real
self.assertTrue(np.allclose(diff, pert, **tol))
if n:
return
diff = (E(H + 2 * eps * gu) - 2 * E(H + eps * gu) + 2 * E(H - eps * gu)
- E(H - 2 * eps * gu)) / (2 * eps ** 3)
pert = elphmod.diagrams.triangle(q[0], q[0], e, gu, gu, gu,
kT=kT, occupations=f).real
self.assertTrue(np.allclose(diff, pert, **tol))
def test_expansion_fermi_dirac(self):
"""Compare diagrams to differences for Fermi-Dirac smearing."""
self._test_expansion(f=elphmod.occupations.fermi_dirac)
def test_expansion_gauss(self):
"""Compare diagrams to differences for Gauss smearing."""
self._test_expansion(f=elphmod.occupations.gauss)
def test_expansion_marzari_vanderbilt(self):
"""Compare diagrams to differences for Marzari-Vanderbilt smearing."""
self._test_expansion(f=elphmod.occupations.marzari_vanderbilt)
def test_expansion_methfessel_paxton(self):
"""Compare diagrams to differences for Methfessel-Paxton smearing."""
self._test_expansion(f=elphmod.occupations.methfessel_paxton)
def test_expansion_double_fermi_dirac(self):
"""Compare diagrams to differences for double Fermi-Dirac smearing."""
self._test_expansion(f=elphmod.occupations.double_fermi_dirac)
def test_expansion_two_fermi_dirac(self):
"""Compare diagrams to differences for two Fermi levels."""
self._test_expansion(f=elphmod.occupations.two_fermi_dirac)
def test_free_energy_fermi_dirac(self):
"""Check free-energy derivatives for Fermi-Dirac smearing."""
self._test_expansion(f=elphmod.occupations.fermi_dirac, n=1.0)
def test_free_energy_gauss(self):
"""Check free-energy derivatives for Gauss smearing."""
self._test_expansion(f=elphmod.occupations.gauss, n=1.0)
def test_free_energy_marzari_vanderbilt(self):
"""Check free-energy derivatives for Marzari-Vanderbilt smearing."""
self._test_expansion(f=elphmod.occupations.marzari_vanderbilt, n=1.0)
def test_free_energy_methfessel_paxton(self):
"""Check free-energy derivatives for Methfessel-Paxton smearing."""
self._test_expansion(f=elphmod.occupations.methfessel_paxton, n=1.0)
def test_free_energy_double_fermi_dirac(self):
"""Check free-energy derivatives for double Fermi-Dirac smearing."""
self._test_expansion(f=elphmod.occupations.double_fermi_dirac, n=1.0)
def test_free_energy_two_fermi_dirac(self):
"""Check free-energy derivatives for two Fermi levels."""
self._test_expansion(f=elphmod.occupations.two_fermi_dirac, n=1.0)
def test_polarization(self, nk=8, nq=4,
kT=0.01, f=elphmod.occupations.fermi_dirac):
"""Compare different implementations of RPA polarization."""
k = elphmod.bravais.mesh(nk, nk)
q = elphmod.bravais.mesh(nq, nq, flat=True)
el, ph, elph = elphmod.models.tas2.create()
e, U = elphmod.dispersion.dispersion(el.H, k, vectors=True)
Pi1 = elphmod.dispersion.sample(elphmod.diagrams.polarization(e, U,
kT=kT, occupations=f), q)
Pi2 = elphmod.diagrams.phonon_self_energy(q, e, psi=U,
kT=kT, occupations=f)
self.assertTrue(np.allclose(Pi1, Pi2))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_dispersion.py | .py | 980 | 33 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod.models.graphene
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestDispersion(unittest.TestCase):
def test_dispersion_full(self, nk=12):
"""Validate mapping of dispersion from irreducible wedge to full BZ."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True,
divide_mass=False)
e_sym = elphmod.dispersion.dispersion_full(el.H, nk)
e_nosym = elphmod.dispersion.dispersion_full_nosym(el.H, nk)
self.assertTrue(np.allclose(e_sym, e_nosym))
el = el.supercell(1, (1, 1, 0))
e_sym = elphmod.dispersion.dispersion_full(el.H, nk, angle=120)
e_nosym = elphmod.dispersion.dispersion_full_nosym(el.H, nk)
self.assertTrue(np.allclose(e_sym, e_nosym))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/run_py_versions_conda.sh | .sh | 486 | 25 | #!/bin/bash
# conda install conda-build
# source run_py_versions_conda.sh
env=/dev/shm/env
log=run_py_versions_conda.log
echo "Tests for different Python versions" > $log
for minor in `seq 5 14`
do
conda create -y -p $env python=3.$minor
conda activate $env
conda install -y numpy scipy
conda develop ..
echo "Tests for Python 3.$minor" | tee -a $log
python3 -m unittest -vfc 2>&1 | tee -a $log
conda deactivate
conda remove -y -p $env --all
done
| Shell |
2D | janberges/elphmod | tests/test_md.py | .py | 1,993 | 63 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod.models.graphene
import elphmod.models.tas2
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestMD(unittest.TestCase):
def test_dense_vs_sparse(self,
N=2, kT=0.1, f=elphmod.occupations.fermi_dirac):
"""Verify that dense and sparse MD drivers yield identical results."""
el, ph, elph, elel = elphmod.models.graphene.create(rydberg=True,
divide_mass=False)
elph_dense = elph.supercell(N, N)
driver_dense = elphmod.md.Driver(elph_dense, kT, f,
n=elph_dense.el.size)
elph_sparse = elph.supercell(N, N, sparse=True)
driver_sparse = elphmod.md.Driver(elph_sparse, kT, f,
n=elph_sparse.el.size)
driver_dense.random_displacements(reproducible=True)
driver_sparse.u[:] = driver_dense.u
self.assertTrue(np.isclose(driver_dense.free_energy(show=False),
driver_sparse.free_energy(show=False)))
self.assertTrue(np.allclose(driver_dense.jacobian(show=False),
driver_sparse.jacobian(show=False)))
def test_superconductivity(self,
N=2, kT=0.1, f=elphmod.occupations.fermi_dirac):
"""Verify that superconductivity calculations are size-consistent."""
el, ph, elph = elphmod.models.tas2.create(rydberg=True,
divide_mass=False)
driver_dense = elphmod.md.Driver(elph, kT, f, nk=(N, N), nq=(N, N),
n=1.0)
elph_sparse = elph.supercell(N, N, sparse=True)
driver_sparse = elphmod.md.Driver(elph_sparse, kT, f,
n=len(elph_sparse.cells))
driver_dense.diagonalize()
driver_sparse.diagonalize()
self.assertTrue(np.allclose(driver_dense.superconductivity(),
driver_sparse.superconductivity()))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_elph.py | .py | 977 | 34 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod.models.graphene
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestElectronPhonon(unittest.TestCase):
def test_q2r(self):
"""Test Fourier interpolation of electron-phonon coupling."""
for divide_mass in False, True:
el, ph, elph, elel = elphmod.models.graphene.create(
divide_mass=divide_mass)
elph2 = copy.copy(elph)
g = elph.sample(elphmod.models.graphene.q.reshape((-1, 3)),
elphmod.models.graphene.nk)
elphmod.elph.q2r(elph2, elphmod.models.graphene.nq,
elphmod.models.graphene.nk, g, ph.r, divide_mass)
elph2.standardize(eps=1e-10)
self.assertTrue(np.allclose(elph.data, elph2.data))
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_occupations.py | .py | 2,021 | 59 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
from elphmod import occupations
import numpy as np
import unittest
tol = dict(rtol=1e-5, atol=1e-4)
class TestOccupations(unittest.TestCase):
def _test_derivatives(self, f, xmax=10.0, nx=2001):
"""Compare analytical and numerical derivatives of step functions."""
x, dx = np.linspace(-xmax, xmax, nx, retstep=True)
xd = x[1:] - dx / 2
xdp = x[1:-1]
xs = x + dx / 2
xs0 = x[0] - dx / 2
self.assertTrue(np.allclose(f.delta(xd),
-np.diff(f(x)) / dx, **tol))
self.assertTrue(np.allclose(f.delta_prime(xdp),
-np.diff(f(x), 2) / dx ** 2, **tol))
if hasattr(f, 'entropy'):
self.assertTrue(np.allclose(f.entropy(xs) - f.entropy(xs0),
-dx * np.cumsum(x * f.delta(x)), **tol))
def test_derivatives_fermi_dirac(self):
"""Validate derivatives of Fermi-Dirac step function."""
self._test_derivatives(occupations.fermi_dirac)
def test_derivatives_gauss(self):
"""Validate derivatives of Gauss step function."""
self._test_derivatives(occupations.gauss)
def test_derivatives_lorentz(self):
"""Validate derivatives of Lorentz step function."""
self._test_derivatives(occupations.lorentz)
def test_derivatives_marzari_vanderbilt(self):
"""Validate derivatives of Marzari-Vanderbilt step function."""
self._test_derivatives(occupations.marzari_vanderbilt)
def test_derivatives_methfessel_paxton(self):
"""Validate derivatives of Methfessel-Paxton step function."""
self._test_derivatives(occupations.methfessel_paxton)
def test_derivatives_double_fermi_dirac(self):
"""Validate derivatives of double Fermi-Dirac step function."""
self._test_derivatives(occupations.double_fermi_dirac)
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/run.sh | .sh | 125 | 10 | #!/bin/bash
set -e
echo "Serial tests"
python3 -m unittest -vfc
echo "Parallel tests"
mpirun -n 2 python3 -m unittest -fc
| Shell |
2D | janberges/elphmod | tests/test_bravais.py | .py | 2,398 | 73 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import elphmod
import numpy as np
import unittest
class TestBravais(unittest.TestCase):
def _test_wigner_2d(self, angle=120, nk=12):
"""Verify that 2D and general code yield same WS data."""
at = np.eye(3)
at[:2, :2] = elphmod.bravais.translations(angle)
tau = np.zeros((1, 3))
irvec, ndegen, wslen = elphmod.bravais.wigner(nk, nk, 1, at, tau)
order = sorted(range(len(irvec)), key=lambda n: tuple(irvec[n]))
irvec_2d, ndegen_2d, wslen_2d = elphmod.bravais.wigner_2d(nk, angle)
order_2d = sorted(range(len(irvec_2d)), key=lambda n: irvec_2d[n])
self.assertTrue(np.array_equal(irvec[order, :2],
np.array(irvec_2d)[order_2d]))
self.assertTrue(np.array_equal(ndegen[order, 0, 0],
np.array(ndegen_2d)[order_2d]))
self.assertTrue(np.allclose(wslen[order, 0, 0],
np.array(wslen_2d)[order_2d]))
def test_wigner_2d_60(self):
"""Verify that 2D (60 degrees) and general code yield same WS data."""
self._test_wigner_2d(60)
def test_wigner_2d_90(self):
"""Verify that 2D (90 degrees) and general code yield same WS data."""
self._test_wigner_2d(90)
def test_wigner_2d_120(self):
"""Verify that 2D (120 degrees) and general code yield same WS data."""
self._test_wigner_2d(120)
def test_images(self, k1=7, k2=3, nk=12):
"""Compare different functions to obtain equivalent k points."""
for ibrav, angle in (4, 60), (6, 90):
img1 = sorted(elphmod.bravais.images_ibrav(k1, k2, 1,
nk, nk, 1, ibrav))
img2 = sorted(elphmod.bravais.images(k1, k2, nk, angle))
img2 = [(k1, k2, 0) for k1, k2 in img2]
self.assertTrue(img1 == img2)
def test_irreducibles(self, nk=12):
"""Compare different functions to obtain irreducible k points."""
for ibrav, angle in (4, 60), (6, 90):
irr1 = sorted(elphmod.bravais.irreducibles_ibrav(nk, nk, 1, ibrav))
irr2 = sorted(elphmod.bravais.irreducibles(nk, angle))
irr2 = [(k1, k2, 0) for k1, k2 in irr2]
self.assertTrue(irr1 == irr2)
if __name__ == '__main__':
unittest.main()
| Python |
2D | janberges/elphmod | tests/test_elel.py | .py | 762 | 30 | #!/usr/bin/env python3
# Copyright (C) 2017-2026 elphmod Developers
# This program is free software under the terms of the GNU GPLv3 or later.
import copy
import elphmod.models.graphene
import numpy as np
import unittest
elphmod.misc.verbosity = 0
class TestElectronElectron(unittest.TestCase):
def test_q2r(self):
"""Test Fourier interpolation of electron-electron interaction."""
el, ph, elph, elel = elphmod.models.graphene.create()
elel2 = copy.copy(elel)
U = elphmod.dispersion.sample(elel.W, elphmod.models.graphene.q)
elphmod.elel.q2r(elel2, U, ph.a, ph.r)
elel2.standardize(eps=1e-10)
self.assertTrue(np.allclose(elel.data, elel2.data))
if __name__ == '__main__':
unittest.main()
| Python |
3D | antecede/EZSpecificity | main_specificity_ss.py | .py | 4,062 | 128 | import sys
root_dir = "/projects/bbto/suyufeng/enzyme_specificity_public"
log_root_dir = "/scratch/bbto/suyufeng/data/logs"
sys.path.append(f"{root_dir}/src")
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import pytorch_lightning as pl
from Datasets.brenda import Singledataset
import torch, shutil, os, sys
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
import torch.multiprocessing
from rdkit import RDLogger
import warnings
from sklearn import metrics
import numpy as np
from absl import flags
from glob import glob
from Models.utils import load_model
from utils import get_new_log_dir, load_config, get_logger
RDLogger.DisableLog('rdApp.*')
torch.multiprocessing.set_sharing_strategy('file_descriptor')
warnings.filterwarnings("ignore")
flags.DEFINE_string('config_path', f"{root_dir}/src/Configs/two_tasks_ss.yml", 'Config path.')
FLAGS = flags.FLAGS
FLAGS(sys.argv)
config_dir = FLAGS.config_path
config = load_config(config_dir)
config.num_cpus = config.num_cpus // config.num_gpus
config.device = torch.device("cuda:0")
log = config.log
np.random.seed(config.seed)
torch.manual_seed(config.seed)
final_results = None
final_label = None
if log:
tag = 'sp_' + str(config.data.tag)
main_log_dir = get_new_log_dir(root=f"{log_root_dir}/logs", tag=tag)
main_logger = get_logger("sample", main_log_dir)
for i in range(config.n_runs):
if log:
log_dir = f"{main_log_dir}/run_{i}"
os.makedirs(log_dir, exist_ok=True)
logger = get_logger("sample", log_dir)
logger.info(config)
writer = TensorBoardLogger(log_dir)
shutil.copyfile(config_dir, os.path.join(log_dir, os.path.basename(config_dir)))
os.makedirs(f"{log_dir}/models", exist_ok=True)
checkpoint_callback = ModelCheckpoint(
monitor='aupr/val',
dirpath=f"{log_dir}/models",
filename='best-checkpoint',
save_top_k=5,
mode = 'max'
)
dm = Singledataset(config)
model = load_model(config=config)
early_stop_callback = EarlyStopping(monitor="aupr/val", min_delta=0.00, patience=60, verbose=False, mode="max")
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer_paras = {
'gpus': config.num_gpus,
'num_nodes': 1,
'callbacks': [early_stop_callback, lr_monitor],
'gradient_clip_val': config.training.gradient_clip_val,
'reload_dataloaders_every_n_epochs': False,
'accumulate_grad_batches': config.training.accumulate_grad_batches,
'accelerator': 'gpu',
'strategy': "ddp"
}
if log:
trainer_paras['callbacks'].append(checkpoint_callback)
trainer_paras['logger'] = writer
try:
trainer_paras['resume_from_checkpoint' ] = config.resume_checkpoint
except:
pass
try:
checkpoint = torch.load(config.finetune_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
except AttributeError:
pass
trainer = pl.Trainer(**trainer_paras)
trainer.fit(model, datamodule=dm)
trainer.test(model=model, ckpt_path='best', datamodule=dm)
logits = model.logits
label = model.labels
fpr, tpr, thresholds = metrics.roc_curve(label.ravel(), logits.ravel(), pos_label=1)
logger.info(f"auroc: {metrics.auc(fpr, tpr)}")
logger.info(f"aupr: {metrics.average_precision_score(label.ravel(), logits.ravel())}")
main_logger.info(f"auroc: {metrics.auc(fpr, tpr)}")
main_logger.info(f"aupr: {metrics.average_precision_score(label.ravel(), logits.ravel())}")
if final_results is None:
final_results = logits
else:
final_results += logits
final_label = label
logits = final_results.ravel()
label = final_label.ravel()
fpr, tpr, thresholds = metrics.roc_curve(label, logits, pos_label=1)
logger.info(f"auroc: {metrics.auc(fpr, tpr)}")
logger.info(f"aupr: {metrics.average_precision_score(label, logits)}") | Python |
3D | antecede/EZSpecificity | example.ipynb | .ipynb | 26,341 | 667 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Prepared Data\n",
"\n",
"The data directory should contain four files / directory:\n",
"1. substrates files: \"substrates.csv\". It must contains the column \"Substrate_SMILES\" which are the smile strings for substrate vocabulary\n",
"2. enzymes files: \"enzymes.csv\". It must contains the column \"Protein sequence\" which are the pdb sequence for enzyme vocabulary\n",
"3. metadata: \"data.csv\". It must contains \"Dock Index\" d, \"Enzyme Index\" e, \"Substrate Index\"s, which means the dth docking is the complex structure of eth enzyme and sth enzyme. The indexing system for enzymes and substrates should align with the vocabulary files mentioned in 1. and 2.\n",
"4. docking structure folder: Structure. It contains several docking files \"i.pdb\" which are the docking result for pair with index i."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"features_output_dir = \"data/small_family/halogenase\"\n",
"prepared_data_dir = \"data/small_family/halogenase\"\n",
"enzyme_csv_path = f\"{prepared_data_dir}/Enzymes.csv\"\n",
"substrate_csv_path = f\"{prepared_data_dir}/Substrates.csv\"\n",
"metadata_csv_path = f\"{prepared_data_dir}/data.csv\"\n",
"structure_dir = f\"{prepared_data_dir}/Structure\"\n",
"src_root_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity\"\n",
"data_root_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate enzyme features"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from Datasets.create_features import generate_esm_embedding, convert_protein_sequence_to_number\n",
"import lmdb\n",
"import pandas as pd\n",
"\n",
"df = pd.read_csv(enzyme_csv_path, sep=\",\")\n",
"env = lmdb.open(\n",
" f\"{features_output_dir}/enzyme_features.lmdb\",\n",
" map_size=600*(1024*1024*1024), # 600GB\n",
" create=True,\n",
" subdir=False,\n",
" readonly=False, # Writable\n",
")\n",
"data = []\n",
"sequences = []\n",
"ids = []\n",
"uniprot_dict = {}\n",
"for index, sequence in enumerate(df[\"Protein sequence\"]):\n",
" if sequence in uniprot_dict:\n",
" print(f\"{index}th protein already in lmdb\")\n",
" if len(sequence) > 1000:\n",
" print(f\"{index}th protein sequence too long\")\n",
" continue\n",
" try:\n",
" convert_protein_sequence_to_number(sequence)\n",
" except:\n",
" print(f\"{index}th proteinsequence contain non-standard amino acid\")\n",
" continue\n",
" sequences.append(sequence)\n",
" ids.append(str(index))\n",
" uniprot_dict[sequence] = (len(uniprot_dict), 1)\n",
" data.append((str(index), sequence))\n",
"generate_esm_embedding(env, data, uniprot_dict, device=\"cpu\")\n",
"env.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate substrate features\n",
"1. Generate graph features\n",
"2. Morgan embedding\n",
"3. Grover embedding"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Generate graph features"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from multiprocessing.pool import Pool\n",
"from Datasets.create_features import get_reaction_feature_single\n",
"import pandas as pd\n",
"import lmdb\n",
"import pickle\n",
"\n",
"reactions = []\n",
"substrates = []\n",
"\n",
"df = pd.read_csv(substrate_csv_path, sep=',').dropna(subset=['Substrate_SMILES'])\n",
"parameters_dict = {}\n",
"for substrate in df['Substrate_SMILES']:\n",
" left = substrate\n",
" right = substrate\n",
" if substrate not in parameters_dict and len(substrate) < 275:\n",
" parameters_dict[substrate] = (len(parameters_dict) - 1, (f\"{substrate}>>{substrate}\", substrate, right, True))\n",
"\n",
"parameters = [parameter[1] for parameter in parameters_dict.values()]\n",
"db = lmdb.open(\n",
" f\"{features_output_dir}/reaction_features.lmdb\",\n",
" map_size=600*(1024*1024*1024), # 600GB\n",
" create=True,\n",
" subdir=False,\n",
" readonly=False, # Writable\n",
")\n",
"# with Pool(60) as pool:\n",
"results = [get_reaction_feature_single(parameter) for parameter in parameters]\n",
"for full_reaction, substrate, tag, data in results:\n",
" if (data is not None) or (tag is None):\n",
" with db.begin(write=True, buffers=True) as txn:\n",
" txn.put(\n",
" key = str(len(reactions)).encode(),\n",
" value = pickle.dumps(data)\n",
" )\n",
" reactions.append(full_reaction)\n",
" substrates.append(substrate)\n",
" else:\n",
" print('Skipped %s %s' % (full_reaction, tag))\n",
"db.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Morgan embedding"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from rdkit.Chem import AllChem, DataStructs\n",
"import numpy as np\n",
"\n",
"df = pd.read_csv(substrate_csv_path, sep=',').dropna(subset=['Substrate_SMILES'])\n",
"results = []\n",
"for substrate in df['Substrate_SMILES']:\n",
" mol = AllChem.MolFromSmiles(substrate)\n",
" fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, 1024)\n",
" arr = np.zeros((0,), dtype=np.int8)\n",
" DataStructs.ConvertToNumpyArray(fp,arr)\n",
" results.append(arr)\n",
"np.save(f\"{features_output_dir}/morgan_fingerprint.npy\", np.array(results))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Grover embedding\n",
"\n",
"It would better to follow instructions mentioned in the Grover package. Here is just an example"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import os\n",
"\n",
"df = pd.read_csv(substrate_csv_path, sep=',').dropna(subset=['Substrate_SMILES'])\n",
"\n",
"results = [smile for smile in df['Substrate_SMILES']]\n",
"data = {\n",
" \"substrates\": results\n",
"}\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(f\"{features_output_dir}/grover_substrates.csv\", index=False)\n",
"\n",
"print(f\"python {src_root_dir}/src/other_softwares/grover_software/scripts/save_features.py --data_path {features_output_dir}/grover_substrates.csv \\\n",
" --save_path {features_output_dir}/grover_substrates.npz \\\n",
" --features_generator fgtasklabel \\\n",
" --restart\")\n",
"\n",
"print(f\"mkdir {features_output_dir}/grover_vocab\")\n",
"print(f\"python {src_root_dir}/src/other_softwares/grover_software/scripts/build_vocab.py --data_path {features_output_dir}/grover_substrates.csv \\\n",
" --vocab_save_folder {features_output_dir}/grover_vocab \\\n",
" --dataset_name test\")\n",
"print(f\"python main.py fingerprint --data_path {features_output_dir}/grover_substrates.csv --features_path {features_output_dir}/grover_substrates.npz --checkpoint_path {data_root_dir}/data/pretrain_model/grover_large.pt --fingerprint_source both --output {features_output_dir}/grover_fingerprint.npz --save_lmdb_path {features_output_dir}/grover_fingerprint.lmdb --fingerprint_source both\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate structure features\n",
"1. Preprocessing generated pocket"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# a. Extract pocket and substrate from pdb file \n",
"from Bio.PDB import *\n",
"from rdkit import Chem\n",
"import glob\n",
"from tqdm import tqdm\n",
"from ray.util.multiprocessing import Pool\n",
"import os\n",
"import ray\n",
"\n",
" \n",
"def single_extract_pocket(names):\n",
" bad_mol = 0\n",
" for name in names:\n",
" id = int(name.split(\"/\")[-1].split(\".\")[0])\n",
"\n",
" pocket_out_path = f\"{structure_dir}/str_tmp_data/pocket/{id}.pdb\"\n",
" ligand_out_path = f\"{structure_dir}/str_tmp_data/raw_ligand/{id}.sdf\"\n",
" lines = []\n",
" ed = 0\n",
" for index, line in enumerate(open(name, \"r\")):\n",
" lines.append(line)\n",
" if \"COMPND\" in line:\n",
" ed = index\n",
"\n",
" protein_lines = lines[:ed]\n",
" ligand_lines = lines[ed+1:]\n",
" ftmp_out = open(f\"{structure_dir}/str_tmp_data/raw_ligand/tmp_{id}.pdb\", \"w\")\n",
" for line in ligand_lines:\n",
" ftmp_out.write(line)\n",
" ftmp_out.close()\n",
"\n",
" mol = Chem.MolFromPDBFile(f\"{structure_dir}/str_tmp_data/raw_ligand/tmp_{id}.pdb\", flavor=1, sanitize=False)\n",
" # os.system(f\"rm -r {structure_dir}/str_tmp_data/raw_ligand/tmp_{id}.pdb\")\n",
" if mol is None:\n",
" print(f\"!!!!!{id}!!!!!\")\n",
" bad_mol += 1\n",
" continue\n",
"\n",
" ligand_coords = []\n",
" # mol = Chem.RemoveHs(mol, sanitize=False)\n",
" for i, atom in enumerate(mol.GetAtoms()):\n",
" positions = mol.GetConformer().GetAtomPosition(i)\n",
" ligand_coords.append((positions.x, positions.y, positions.z))\n",
" \n",
" try:\n",
" writer = Chem.SDWriter(ligand_out_path)\n",
" writer.write(mol, confId=0)\n",
" except:\n",
" bad_mol += 1\n",
" continue\n",
"\n",
" fin = open(pocket_out_path, \"w\")\n",
"\n",
" for line in protein_lines:\n",
" if \"ATOM\" in line:\n",
" try:\n",
" x = float(line[30:38])\n",
" y = float(line[38:46])\n",
" z = float(line[46:54])\n",
" for ligand_coord in ligand_coords:\n",
" if 'H' not in line[12:16].strip() and distance(x, y, z, ligand_coord[0], ligand_coord[1], ligand_coord[2]) < 10:\n",
" fin.write(line)\n",
" break\n",
" except:\n",
" continue\n",
" \n",
" elif \"HETATM\" in line or \"ENDMDL\" in line:\n",
" fin.write(line)\n",
" return bad_mol\n",
"\n",
"def distance(x1, y1, z1, x2, y2, z2):\n",
" return (1. * (x2 - x1) ** 2 + 1. * (y2 - y1) ** 2 + 1. * (z2 - z1) ** 2) ** 0.5\n",
"\n",
"\n",
"os.makedirs(f\"{structure_dir}/str_tmp_data\", exist_ok=True)\n",
"os.makedirs(f\"{structure_dir}/str_tmp_data/raw_ligand\", exist_ok=True)\n",
"os.makedirs(f\"{structure_dir}/str_tmp_data/pocket\", exist_ok=True)\n",
"\n",
"mol = None\n",
"\n",
"for name in tqdm(glob.glob(f\"{structure_dir}/*.pdb\")):\n",
" single_extract_pocket([name])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# b. Align substrate in docking pocket and substrate in smiles\n",
"from rdkit.Chem import rdFMCS\n",
"from rdkit.Chem import Draw\n",
"from rdkit.Chem import AllChem,rdDepictor\n",
"from rdkit import Chem\n",
"import pandas as pd\n",
"import glob\n",
"from rdkit import RDLogger\n",
"import ray\n",
"\n",
"def AssignBondOrdersFromTemplate(refmol, mol):\n",
" \"\"\" assigns bond orders to a molecule based on the\n",
" bond orders in a template molecule\n",
" Revised from AllChem.AssignBondOrderFromTemplate(refmol, mol)\n",
" \"\"\"\n",
" AllChem.AssignBondOrdersFromTemplate\n",
" refmol2 = Chem.rdchem.Mol(refmol)\n",
" mol2 = Chem.rdchem.Mol(mol)\n",
" # do the molecules match already?\n",
" matching = mol2.GetSubstructMatch(refmol2)\n",
" if not matching: # no, they don't match\n",
" # check if bonds of mol are SINGLE\n",
" for b in mol2.GetBonds():\n",
" if b.GetBondType() != Chem.BondType.SINGLE:\n",
" b.SetBondType(Chem.BondType.SINGLE)\n",
" b.SetIsAromatic(False)\n",
" # set the bonds of mol to SINGLE\n",
" for b in refmol2.GetBonds():\n",
" b.SetBondType(Chem.BondType.SINGLE)\n",
" b.SetIsAromatic(False)\n",
" # set atom charges to zero;\n",
" for a in refmol2.GetAtoms():\n",
" a.SetFormalCharge(0)\n",
" for a in mol2.GetAtoms():\n",
" a.SetFormalCharge(0)\n",
"\n",
" matching = mol2.GetSubstructMatches(refmol2, uniquify=False)\n",
" # do the molecules match now?\n",
" if matching:\n",
" if len(matching) > 1:\n",
" #logger.warning(\"More than one matching pattern found - picking one\")\n",
" pass\n",
" matchings=matching[:]\n",
" for matching in matchings:\n",
" #matching = matching[0] ## use each matching\n",
" # apply matching: set bond properties\n",
" for b in refmol.GetBonds():\n",
" atom1 = matching[b.GetBeginAtomIdx()]\n",
" atom2 = matching[b.GetEndAtomIdx()]\n",
" b2 = mol2.GetBondBetweenAtoms(atom1, atom2)\n",
" b2.SetBondType(b.GetBondType())\n",
" b2.SetIsAromatic(b.GetIsAromatic())\n",
" # apply matching: set atom properties\n",
" for a in refmol.GetAtoms():\n",
" a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])\n",
" a2.SetHybridization(a.GetHybridization())\n",
" a2.SetIsAromatic(a.GetIsAromatic())\n",
" a2.SetNumExplicitHs(a.GetNumExplicitHs())\n",
" a2.SetFormalCharge(a.GetFormalCharge())\n",
" try:\n",
" Chem.SanitizeMol(mol2)\n",
" if hasattr(mol2, '__sssAtoms'):\n",
" mol2.__sssAtoms = None # we don't want all bonds highlighted\n",
" break\n",
" except ValueError:\n",
" pass\n",
" # print(\"More than one matching pattern, Fail at this matching. Try next.\")\n",
" else:\n",
" raise ValueError(\"No matching found\")\n",
" return mol2\n",
"\n",
"def alignment_number_system(sdf, smile_mol):\n",
" \n",
" template = smile_mol\n",
" query = sdf\n",
"\n",
" mcs = rdFMCS.FindMCS([template, query], timeout=120)\n",
" patt = Chem.MolFromSmarts(mcs.smartsString)\n",
"\n",
" query_match = query.GetSubstructMatch(patt)\n",
" template_match = template.GetSubstructMatch(patt)\n",
"\n",
" result = [-1] * query.GetNumAtoms()\n",
"\n",
" for query_atom_id, template_atom_id in zip(query_match, template_match):\n",
" result[query_atom_id] = template_atom_id\n",
"\n",
" # Check if there is any atom not matched\n",
" for atom in query.GetAtoms():\n",
" assert atom.GetAtomicNum() == 1 or result[atom.GetIdx()] != -1\n",
"\n",
" return result\n",
"\n",
"def assign_idx(mol, idxs):\n",
" for atom, idx in zip(mol.GetAtoms(), idxs):\n",
" atom.SetAtomMapNum(idx)\n",
" return mol\n",
"\n",
"def mol_get_atomic_number(mol, atom_map=False):\n",
" result = [0] * mol.GetNumAtoms()\n",
" for atom in mol.GetAtoms():\n",
" if atom.GetAtomMapNum() != -1:\n",
" if atom_map:\n",
" result[atom.GetAtomMapNum()] = atom.GetAtomicNum()\n",
" else:\n",
" result[atom.GetIdx()] = atom.GetAtomicNum()\n",
" return result\n",
"\n",
"def check(mol, mol2):\n",
" for atom in mol.GetAtoms():\n",
" if atom.GetAtomMapNum() != -1:\n",
" id = atom.GetAtomMapNum()\n",
" \n",
" atom2 = mol2.GetAtomWithIdx(id)\n",
" if atom.GetAtomicNum() != atom2.GetAtomicNum():\n",
" return False\n",
" return True\n",
"\n",
"def view_difference(mol1, mol2):\n",
" mcs = rdFMCS.FindMCS([mol1,mol2])\n",
" mcs_mol = Chem.MolFromSmarts(mcs.smartsString)\n",
" match1 = mol1.GetSubstructMatch(mcs_mol)\n",
" target_atm1 = []\n",
" for atom in mol1.GetAtoms():\n",
" if atom.GetIdx() not in match1:\n",
" target_atm1.append(atom.GetIdx())\n",
" match2 = mol2.GetSubstructMatch(mcs_mol)\n",
" target_atm2 = []\n",
" for atom in mol2.GetAtoms():\n",
" if atom.GetIdx() not in match2:\n",
" target_atm2.append(atom.GetIdx())\n",
" return Draw.MolsToGridImage([mol1, mol2],highlightAtomLists=[target_atm1, target_atm2])\n",
"\n",
"def single_match(item):\n",
" id, smile = item\n",
" sdf_path = f\"{structure_dir}/str_tmp_data/raw_ligand/{id}.sdf\"\n",
" RDLogger.DisableLog('rdApp.*')\n",
"\n",
" try:\n",
" mol = next(iter(Chem.SDMolSupplier(sdf_path, sanitize=True)))\n",
" mol = Chem.RemoveHs(mol)\n",
" except Exception as e:\n",
" try:\n",
" mol = next(iter(Chem.SDMolSupplier(sdf_path, sanitize=False)))\n",
" mol = Chem.RemoveHs(mol, sanitize=False)\n",
" except Exception as e:\n",
" return 1 \n",
" # mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))\n",
" try:\n",
" smile_mol = Chem.MolFromSmiles(smile)\n",
" except Exception as e:\n",
" return 1\n",
" # print(mol_get_atomic_number(smile_mol))\n",
" # print(len(smile_mol.GetAtoms()))\n",
" try:\n",
" aligned_idx = alignment_number_system(mol, smile_mol)\n",
" except:\n",
" \n",
" try:\n",
" mol = AssignBondOrdersFromTemplate(smile_mol, mol)\n",
" except:\n",
" return 1\n",
" \n",
" try:\n",
" aligned_idx = alignment_number_system(mol, smile_mol)\n",
" except:\n",
" return 1\n",
" mol = assign_idx(mol, aligned_idx)\n",
" # print(mol_get_atomic_number(mol, atom_map=True))\n",
" if not check(mol, smile_mol):\n",
" return 1\n",
"\n",
" w = Chem.SDWriter(f\"{structure_dir}/str_tmp_data/ligand/{id}.sdf\")\n",
" try:\n",
" w.write(mol)\n",
" w.close()\n",
" except:\n",
" w.close()\n",
" return 1\n",
" \n",
" return 0\n",
"\n",
"def batched_match(items):\n",
" ans = 0\n",
" for item in items:\n",
" id, smile = item\n",
" try:\n",
" result = single_match(item)\n",
" ans += result\n",
" except:\n",
" ans += 1\n",
"\n",
" if not result:\n",
" os.system(f\"rm -r {structure_dir}/str_tmp_data/raw_ligand/{id}.sdf\")\n",
"\n",
" return ans\n",
"\n",
"from tqdm import tqdm\n",
"\n",
"import os\n",
"os.makedirs(f\"{structure_dir}/str_tmp_data/ligand\", exist_ok=True)\n",
"\n",
"df = pd.read_csv(metadata_csv_path)\n",
"print(df)\n",
"sub_index_dict = {index: substrate_id for index, substrate_id in zip(df[\"structure_index\"].values, df[\"reaction\"].values)}\n",
"\n",
"\n",
"sub_df = pd.read_csv(substrate_csv_path)\n",
"substrates = sub_df[\"Substrate_SMILES\"].values\n",
"\n",
"bad_molecular = 0\n",
"\n",
"parameters = []\n",
"for index, sdf_path in tqdm(enumerate(glob.glob(f\"{structure_dir}/str_tmp_data/raw_ligand/*.sdf\"))):\n",
" \n",
" id = os.path.basename(sdf_path).split(\".\")[0]\n",
" \n",
" if int(id) not in sub_index_dict:\n",
" continue\n",
"\n",
" substrate_id = int(sub_index_dict[int(id)])\n",
" smile = substrates[substrate_id]\n",
" parameters.append((int(id), smile))\n",
"\n",
"results = [batched_match(parameters[i*100: (i+1)*100]) for i in range(len(parameters) // 100)]\n",
"bad_molecular = 0\n",
"for result in results:\n",
" bad_molecular += result\n",
"print(bad_molecular)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"2. Create structure features"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from easydict import EasyDict as edict\n",
"from Datasets.Structure.structure import str_process, seq_process\n",
"import pandas as pd\n",
"dict = {\n",
" \"data_df_path\": metadata_csv_path,\n",
" \"ligand_df_path\": substrate_csv_path,\n",
" \"data\": {\n",
" \"structure_processed_path\": f\"{features_output_dir}/structure_features.lmdb\",\n",
" \"sequence_processed_path\": f\"{features_output_dir}/str_features.lmdb\",\n",
" \"pdb_dir\": f\"{structure_dir}/str_tmp_data/pocket\",\n",
" \"ligand_dir\": f\"{structure_dir}/str_tmp_data/ligand\"\n",
" }\n",
"}\n",
"config = edict(dict)\n",
"df = pd.read_csv(config.data_df_path)\n",
"str_process(config, df)\n",
"df = pd.read_csv(config.ligand_df_path)\n",
"seq_process(config, df)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate prediction"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from easydict import EasyDict as edict\n",
"from main_testing import generate_prediction\n",
"from utils import load_config\n",
"\n",
"features_output_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity/data/metabolite\"\n",
"prepared_data_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity/data/metabolite\"\n",
"enzyme_csv_path = f\"{prepared_data_dir}/Enzymes.csv\"\n",
"substrate_csv_path = f\"{prepared_data_dir}/Substrates.csv\"\n",
"metadata_csv_path = f\"{prepared_data_dir}/data.csv\"\n",
"structure_dir = f\"{prepared_data_dir}/Structure\"\n",
"src_root_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity\"\n",
"data_root_dir = \"/work/hdd/bbsm/suyufeng/enzyme_specificity\"\n",
"\n",
"dict = {\n",
" \"data\": {\n",
" \"tag\": \"example\",\n",
" \"representer\": \"structure_sequence\",\n",
" \"log_dir\": [\n",
" f\"{data_root_dir}/saved_model/complete-full-random-all-0-complex\",\n",
" f\"{data_root_dir}/saved_model/complete-full-random-all-1-complex\",\n",
" f\"{data_root_dir}/saved_model/complete-full-random-all-2-complex\",\n",
" f\"{data_root_dir}/saved_model/complete-full-random-all-3-complex\"\n",
" ],\n",
" \"train_data_path\": metadata_csv_path,\n",
" \"val_data_path\": metadata_csv_path,\n",
" \"test_data_path\": metadata_csv_path,\n",
" \"enzyme_lmdb_path\": f\"{features_output_dir}/enzyme_features.lmdb\",\n",
" \"reaction_lmdb_path\": f\"{features_output_dir}/reaction_features.lmdb\",\n",
" \"grover_path\": f\"{features_output_dir}/grover_fingerprint.lmdb\",\n",
" \"morgan_path\": f\"{features_output_dir}/morgan_fingerprint.npy\",\n",
" \"structure_processed_path\": f\"{features_output_dir}/structure_features.lmdb\",\n",
" \"sequence_processed_path\": f\"{features_output_dir}/str_features.lmdb\",\n",
" \"pdb_dir\": f\"{structure_dir}/str_tmp_data/pocket\",\n",
" \"ligand_dir\": f\"{structure_dir}/str_tmp_data/ligand\",\n",
" \"high_quality_id_path\": f\"{data_root_dir}/data/brenda/high_quality_zero.txt\",\n",
" \"full_data\": True,\n",
" \"fake_sequence_ratio\": 0,\n",
" \"sample_weight\": [1.0, 1.0],\n",
" \"batch_size\": 16,\n",
" \"max_substrate_length\": 280,\n",
" \"max_enzyme_length\": 1450,\n",
" \"features\": [\"morgan\", 1024, \"grover_mean\", 4885],\n",
" \"atom_features\": [\"grover\", 2400]\n",
" }\n",
"}\n",
"config = load_config(f\"{data_root_dir}/saved_model/complete-full-random-all-0-complex/run_0/complete-full-random-all-0-complex.yml\")\n",
"config.data = edict(dict).data\n",
"\n",
"score = generate_prediction(config)\n",
"print(score)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "ezspecificity",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
3D | antecede/EZSpecificity | main_specificity_ss_eval.py | .py | 3,059 | 103 | import sys
root_dir = "/projects/bbto/suyufeng/enzyme_specificity_public"
sys.path.append(f"{root_dir}/src")
import pytorch_lightning as pl
from Datasets.brenda import Singledataset
import torch, sys, glob
import torch.multiprocessing
from rdkit import RDLogger
import warnings
from sklearn import metrics
import numpy as np
from absl import flags
from Models.utils import load_model
from utils import load_config
from Models.ss import SS
RDLogger.DisableLog('rdApp.*')
torch.multiprocessing.set_sharing_strategy('file_descriptor')
warnings.filterwarnings("ignore")
flags.DEFINE_string('log_dir', None, 'Config path.')
FLAGS = flags.FLAGS
FLAGS(sys.argv)
assert FLAGS.log_dir is not None
log_dirs = [FLAGS.log_dir]
log = False
print(glob.glob(f"{log_dirs[0]}/*.yml"))
config_path = glob.glob(f"{log_dirs[0]}/*.yml")[0]
print(config_path)
config = load_config(config_path)
config.device = torch.device("cuda:0")
# fix random seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
full_logits = None
full_labels = None
full_logits_dict = {}
full_labels_dict = {}
num_runs = 0
for log_dir in log_dirs:
for resume_checkpoint_path in glob.glob(f"{log_dir}/models/*.ckpt"):
print(resume_checkpoint_path)
if "-v" in resume_checkpoint_path:
continue
config.num_cpus = 16
dm = Singledataset(config)
model = SS.load_from_checkpoint(resume_checkpoint_path, config=config)
trainer_paras = {
'gpus': 1,
'num_nodes': 1,
'gradient_clip_val': config.training.gradient_clip_val,
'reload_dataloaders_every_n_epochs': False,
'accumulate_grad_batches': config.training.accumulate_grad_batches,
'accelerator': 'gpu',
'strategy': "ddp"
}
trainer = pl.Trainer(**trainer_paras)
trainer.test(model, datamodule=dm)
logits = model.logits
label = model.labels
logits_dict = model.logits_dict
labels_dict = model.labels_dict
if full_logits is None:
full_logits = logits
full_labels = label
else:
full_logits = full_logits + logits
full_labels = label
num_runs += 1
for key in logits_dict:
if key not in full_logits_dict:
full_logits_dict[key] = np.array(logits_dict[key])
full_labels_dict[key] = np.array(labels_dict[key])
else:
full_logits_dict[key] += np.array(logits_dict[key])
def get_auc_apr(logits, labels, tag):
fpr, tpr, thresholds = metrics.roc_curve(labels.ravel(), logits.ravel(), pos_label=1)
print(f"{tag} auroc: {metrics.auc(fpr, tpr)}")
print(f"{tag} aupr: {metrics.average_precision_score(labels.ravel(), logits.ravel())}")
full_logits = full_logits / num_runs
get_auc_apr(full_logits, full_labels, "overall")
for key in full_logits_dict:
get_auc_apr(full_logits_dict[key], full_labels_dict[key], key)
np.savetxt(f"{config.data.tag}_logits.txt", full_logits) | Python |
3D | antecede/EZSpecificity | utils.py | .py | 1,337 | 41 | import os
import time
from easydict import EasyDict
import yaml
from logging import Logger
import logging
def get_logger(name, log_dir=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s::%(name)s::%(levelname)s] %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_dir is not None:
file_handler = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def get_new_log_dir(root='./logs', prefix='', tag=''):
time_data_tag = time.strftime('%Y_%m_%d', time.localtime())
hour_minute_tag = time.strftime('%H_%M_%S', time.localtime())
if prefix != '':
hour_minute_tag = prefix + '_' + hour_minute_tag
if tag != '':
hour_minute_tag = hour_minute_tag + '_' + tag
log_dir = os.path.join(root, time_data_tag, hour_minute_tag)
# log_dir = os.path.join(log_dir, running_tag)
os.makedirs(log_dir, exist_ok=True)
return log_dir
def load_config(path):
with open(path, 'r') as f:
return EasyDict(yaml.safe_load(f)) | Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/main.py | .py | 1,865 | 56 | import random
import numpy as np
import torch
from rdkit import RDLogger
from grover.util.parsing import parse_args, get_newest_train_args
from grover.util.utils import create_logger
from task.cross_validate import cross_validate
from task.fingerprint import generate_fingerprints
from task.predict import make_predictions, write_prediction
from task.pretrain import pretrain_model
from grover.data.torchvocab import MolVocab
def setup(seed):
# frozen random seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
# setup random seed
setup(seed=42)
# Avoid the pylint warning.
a = MolVocab
# supress rdkit logger
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
# Initialize MolVocab
mol_vocab = MolVocab
args = parse_args()
if args.parser_name == 'finetune':
logger = create_logger(name='train', save_dir=args.save_dir, quiet=False)
cross_validate(args, logger)
elif args.parser_name == 'pretrain':
logger = create_logger(name='pretrain', save_dir=args.save_dir)
pretrain_model(args, logger)
elif args.parser_name == "eval":
logger = create_logger(name='eval', save_dir=args.save_dir, quiet=False)
cross_validate(args, logger)
elif args.parser_name == 'fingerprint':
train_args = get_newest_train_args()
logger = create_logger(name='fingerprint', save_dir=None, quiet=False)
feas = generate_fingerprints(args, logger)
# np.savez_compressed(args.output_path, fps=feas)
elif args.parser_name == 'predict':
train_args = get_newest_train_args()
avg_preds, test_smiles = make_predictions(args, train_args)
write_prediction(avg_preds, test_smiles, args)
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/run_evaluation.py | .py | 5,581 | 158 | """
The evaluation function.
"""
from argparse import Namespace
from logging import Logger
from typing import List
import numpy as np
import torch
import torch.utils.data.distributed
from grover.data.scaler import StandardScaler
from grover.util.utils import get_class_sizes, get_data, split_data, get_task_names, get_loss_func
from grover.util.utils import load_checkpoint
from task.predict import evaluate_predictions
from grover.util.metrics import get_metric_func
from grover.util.nn_utils import param_count
from task.predict import predict
def run_evaluation(args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
torch.cuda.set_device(0)
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
train_data, val_data, test_data = split_data(data=data,
split_type=args.split_type,
sizes=[0.8, 0.1, 0.1],
seed=args.seed,
args=args,
logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(data)
debug('Class sizes')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if args.features_scaling:
features_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(features_scaler)
test_data.normalize_features(features_scaler)
else:
features_scaler = None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler (regression only)
scaler = None
if args.dataset_type == 'regression':
debug('Fitting scaler')
_, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
val_targets = val_data.targets()
scaled_val_targets = scaler.transform(val_targets).tolist()
val_data.set_targets(scaled_val_targets)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Load/build model
if args.checkpoint_paths is not None:
cur_model = args.seed
target_path = []
for path in args.checkpoint_paths:
if "fold_%d" % cur_model in path:
target_path = path
debug(f'Loading model {args.seed} from {target_path}')
model = load_checkpoint(target_path, current_args=args, cuda=args.cuda, logger=logger)
# Get loss and metric functions
loss_func = get_loss_func(args, model)
debug(f'Number of parameters = {param_count(model):,}')
test_preds, _ = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
loss_func=loss_func,
logger=logger,
shared_dict={},
scaler=scaler,
args=args
)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds, dtype=float)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model test {args.metric} = {avg_test_score:.6f}')
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model test {task_name} {args.metric} = {test_score:.6f}')
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# If you want to save the prediction result, uncomment these lines.
# ind = [['preds'] * args.num_tasks + ['targets'] * args.num_tasks, args.task_names * 2]
# ind = pd.MultiIndex.from_tuples(list(zip(*ind)))
# data = np.concatenate([np.array(avg_test_preds), np.array(test_targets)], 1)
# test_result = pd.DataFrame(data, index=test_smiles, columns=ind)
# test_result.to_csv(os.path.join(args.save_dir, 'test_result.csv'))
return ensemble_scores
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/train.py | .py | 16,779 | 439 | """
The training function used in the finetuning task.
"""
import csv
import logging
import os
import pickle
import time
from argparse import Namespace
from logging import Logger
from typing import List
import numpy as np
import pandas as pd
import torch
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from grover.data import MolCollator
from grover.data import StandardScaler
from grover.util.metrics import get_metric_func
from grover.util.nn_utils import initialize_weights, param_count
from grover.util.scheduler import NoamLR
from grover.util.utils import build_optimizer, build_lr_scheduler, makedirs, load_checkpoint, get_loss_func, \
save_checkpoint, build_model
from grover.util.utils import get_class_sizes, get_data, split_data, get_task_names
from task.predict import predict, evaluate, evaluate_predictions
def train(epoch, model, data, loss_func, optimizer, scheduler,
shared_dict, args: Namespace, n_iter: int = 0,
logger: logging.Logger = None):
"""
Trains a model for an epoch.
:param model: Model.
:param data: A MoleculeDataset (or a list of MoleculeDatasets if using moe).
:param loss_func: Loss function.
:param optimizer: An Optimizer.
:param scheduler: A learning rate scheduler.
:param args: Arguments.
:param n_iter: The number of iterations (training examples) trained on so far.
:param logger: A logger for printing intermediate results.
:param writer: A tensorboardX SummaryWriter.
:return: The total number of iterations (training examples) trained on so far.
"""
# debug = logger.debug if logger is not None else print
model.train()
# data.shuffle()
loss_sum, iter_count = 0, 0
cum_loss_sum, cum_iter_count = 0, 0
mol_collator = MolCollator(shared_dict=shared_dict, args=args)
num_workers = 4
if type(data) == DataLoader:
mol_loader = data
else:
mol_loader = DataLoader(data, batch_size=args.batch_size, shuffle=True,
num_workers=num_workers, collate_fn=mol_collator)
for _, item in enumerate(mol_loader):
_, batch, features_batch, mask, targets = item
if next(model.parameters()).is_cuda:
mask, targets = mask.cuda(), targets.cuda()
class_weights = torch.ones(targets.shape)
if args.cuda:
class_weights = class_weights.cuda()
# Run model
model.zero_grad()
preds = model(batch, features_batch)
loss = loss_func(preds, targets) * class_weights * mask
loss = loss.sum() / mask.sum()
loss_sum += loss.item()
iter_count += args.batch_size
cum_loss_sum += loss.item()
cum_iter_count += 1
loss.backward()
optimizer.step()
if isinstance(scheduler, NoamLR):
scheduler.step()
n_iter += args.batch_size
#if (n_iter // args.batch_size) % args.log_frequency == 0:
# lrs = scheduler.get_lr()
# loss_avg = loss_sum / iter_count
# loss_sum, iter_count = 0, 0
# lrs_str = ', '.join(f'lr_{i} = {lr:.4e}' for i, lr in enumerate(lrs))
return n_iter, cum_loss_sum / cum_iter_count
def run_training(args: Namespace, time_start, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# pin GPU to local rank.
idx = args.gpu
if args.gpu is not None:
torch.cuda.set_device(idx)
features_scaler, scaler, shared_dict, test_data, train_data, val_data = load_data(args, debug, logger)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
# Load/build model
if args.checkpoint_paths is not None:
if len(args.checkpoint_paths) == 1:
cur_model = 0
else:
cur_model = model_idx
debug(f'Loading model {cur_model} from {args.checkpoint_paths[cur_model]}')
model = load_checkpoint(args.checkpoint_paths[cur_model], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(model_idx=model_idx, args=args)
if args.fine_tune_coff != 1 and args.checkpoint_paths is not None:
debug("Fine tune fc layer with different lr")
initialize_weights(model_idx=model_idx, model=model.ffn, distinct_init=args.distinct_init)
# Get loss and metric functions
loss_func = get_loss_func(args, model)
optimizer = build_optimizer(model, args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Bulid data_loader
shuffle = True
mol_collator = MolCollator(shared_dict={}, args=args)
train_data = DataLoader(train_data,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=10,
collate_fn=mol_collator)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
min_val_loss = float('inf')
for epoch in range(args.epochs):
s_time = time.time()
n_iter, train_loss = train(
epoch=epoch,
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
shared_dict=shared_dict,
logger=logger
)
t_time = time.time() - s_time
s_time = time.time()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
shared_dict=shared_dict,
logger=logger,
args=args
)
v_time = time.time() - s_time
# Average validation score
avg_val_score = np.nanmean(val_scores)
# Logged after lr step
if isinstance(scheduler, ExponentialLR):
scheduler.step()
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.6f}'.format(train_loss),
'loss_val: {:.6f}'.format(val_loss),
f'{args.metric}_val: {avg_val_score:.4f}',
# 'auc_val: {:.4f}'.format(avg_val_score),
'cur_lr: {:.5f}'.format(scheduler.get_lr()[-1]),
't_time: {:.4f}s'.format(t_time),
'v_time: {:.4f}s'.format(v_time))
if args.tensorboard:
writer.add_scalar('loss/train', train_loss, epoch)
writer.add_scalar('loss/val', val_loss, epoch)
writer.add_scalar(f'{args.metric}_val', avg_val_score, epoch)
# Save model checkpoint if improved validation score
if args.select_by_loss:
if val_loss < min_val_loss:
min_val_loss, best_epoch = val_loss, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
else:
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
if epoch - best_epoch > args.early_stop_epoch:
break
ensemble_scores = 0.0
# Evaluate on test set using model with best validation score
if args.select_by_loss:
info(f'Model {model_idx} best val loss = {min_val_loss:.6f} on epoch {best_epoch}')
else:
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds, _ = predict(
model=model,
data=test_data,
loss_func=loss_func,
batch_size=args.batch_size,
logger=logger,
shared_dict=shared_dict,
scaler=scaler,
args=args
)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds, dtype=float)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
ind = [['preds'] * args.num_tasks + ['targets'] * args.num_tasks, args.task_names * 2]
ind = pd.MultiIndex.from_tuples(list(zip(*ind)))
data = np.concatenate([np.array(avg_test_preds), np.array(test_targets)], 1)
test_result = pd.DataFrame(data, index=test_smiles, columns=ind)
test_result.to_csv(os.path.join(args.save_dir, 'test_result.csv'))
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
def load_data(args, debug, logger):
"""
load the training data.
:param args:
:param debug:
:param logger:
:return:
"""
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path)
data = get_data(path=args.data_path, args=args, logger=logger)
if data.data[0].features is not None:
args.features_dim = len(data.data[0].features)
else:
args.features_dim = 0
shared_dict = {}
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args,
features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args,
features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type,
sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type,
sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type,
sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(data)
debug('Class sizes')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
#if args.save_smiles_splits:
# save_splits(args, test_data, train_data, val_data)
if args.features_scaling:
features_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(features_scaler)
test_data.normalize_features(features_scaler)
else:
features_scaler = None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
_, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
val_targets = val_data.targets()
scaled_val_targets = scaler.transform(val_targets).tolist()
val_data.set_targets(scaled_val_targets)
else:
scaler = None
return features_scaler, scaler, shared_dict, test_data, train_data, val_data
def save_splits(args, test_data, train_data, val_data):
"""
Save the splits.
:param args:
:param test_data:
:param train_data:
:param val_data:
:return:
"""
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = line[0]
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
return writer
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/fingerprint.py | .py | 3,113 | 100 | """
The fingerprint generation function.
"""
from argparse import Namespace
from logging import Logger
from typing import List
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from grover.data import MolCollator
from grover.data import MoleculeDataset
from grover.util.utils import get_data, create_logger, load_checkpoint
def do_generate(model: nn.Module,
data: MoleculeDataset,
args: Namespace,
) -> List[List[float]]:
"""
Do the fingerprint generation on a dataset using the pre-trained models.
:param model: A model.
:param data: A MoleculeDataset.
:param args: A StandardScaler object fit on the training targets.
:return: A list of fingerprints.
"""
import lmdb
model.eval()
args.bond_drop_rate = 0
preds = []
mol_collator = MolCollator(args=args, shared_dict={})
num_workers = 4
mol_loader = DataLoader(data,
batch_size=32,
shuffle=False,
num_workers=num_workers,
collate_fn=mol_collator)
env = lmdb.open(
args.save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
st = 0
for item in mol_loader:
import pickle
import numpy as np
_, batch, features_batch, _, _ = item
with torch.no_grad():
_, _, _, _, _, a_scope, _, _ = batch
batch_preds = model(batch, features_batch)
for id, i in enumerate(a_scope):
with env.begin(write=True, buffers=True) as txn:
data = {
'embedding': np.concatenate((batch_preds[0][i[0]:i[0]+i[1]].data.cpu().numpy(), batch_preds[0][i[0]:i[0]+i[1]].data.cpu().numpy()), axis=1),
'total_embedding': batch_preds[2][id].cpu().numpy(),
}
txn.put(key=str(id+st).encode(), value=pickle.dumps(data))
st += 32
return preds
def generate_fingerprints(args: Namespace, logger: Logger = None) -> List[List[float]]:
"""
Generate the fingerprints.
:param logger:
:param args: Arguments.
:return: A list of lists of target fingerprints.
"""
checkpoint_path = args.checkpoint_paths[0]
if logger is None:
logger = create_logger('fingerprints', quiet=False)
print('Loading data')
test_data = get_data(path=args.data_path,
args=args,
use_compound_names=False,
max_data_size=float("inf"),
skip_invalid_smiles=False)
test_data = MoleculeDataset(test_data)
logger.info(f'Total size = {len(test_data):,}')
logger.info(f'Generating...')
# Load model
model = load_checkpoint(checkpoint_path, cuda=args.cuda, current_args=args, logger=logger)
model_preds = do_generate(
model=model,
data=test_data,
args=args
)
return model_preds
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/pretrain.py | .py | 9,571 | 242 | """
The GROVER pretrain function.
"""
import os
import time
from argparse import Namespace
from logging import Logger
import torch
from torch.utils.data import DataLoader
from grover.data.dist_sampler import DistributedSampler
from grover.data.groverdataset import get_data, split_data, GroverCollator, BatchMolDataset
from grover.data.torchvocab import MolVocab
from grover.model.models import GROVEREmbedding
from grover.util.multi_gpu_wrapper import MultiGpuWrapper as mgw
from grover.util.nn_utils import param_count
from grover.util.utils import build_optimizer, build_lr_scheduler
from task.grovertrainer import GROVERTrainer
def pretrain_model(args: Namespace, logger: Logger = None):
"""
The entrey of pretrain.
:param args: the argument.
:param logger: the logger.
:return:
"""
# avoid auto optimized import by pycharm.
a = MolVocab
s_time = time.time()
run_training(args=args, logger=logger)
e_time = time.time()
print("Total Time: %.3f" % (e_time - s_time))
def pre_load_data(dataset: BatchMolDataset, rank: int, num_replicas: int, sample_per_file: int = None, epoch: int = 0):
"""
Pre-load data at the beginning of each epoch.
:param dataset: the training dataset.
:param rank: the rank of the current worker.
:param num_replicas: the replicas.
:param sample_per_file: the number of the data points in each file. When sample_per_file is None, all data will be
loaded. It implies the testing phase. (TODO: bad design here.)
:param epoch: the epoch number.
:return:
"""
mock_sampler = DistributedSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=False,
sample_per_file=sample_per_file)
mock_sampler.set_epoch(epoch)
pre_indices = mock_sampler.get_indices()
for i in pre_indices:
dataset.load_data(i)
def run_training(args, logger):
"""
Run the pretrain task.
:param args:
:param logger:
:return:
"""
# initalize the logger.
if logger is not None:
debug, _ = logger.debug, logger.info
else:
debug = print
# initialize the horovod library
if args.enable_multi_gpu:
mgw.init()
# binding training to GPUs.
master_worker = (mgw.rank() == 0) if args.enable_multi_gpu else True
# pin GPU to local rank. By default, we use gpu:0 for training.
local_gpu_idx = mgw.local_rank() if args.enable_multi_gpu else 0
with_cuda = args.cuda
if with_cuda:
torch.cuda.set_device(local_gpu_idx)
# get rank an number of workers
rank = mgw.rank() if args.enable_multi_gpu else 0
num_replicas = mgw.size() if args.enable_multi_gpu else 1
# print("Rank: %d Rep: %d" % (rank, num_replicas))
# load file paths of the data.
if master_worker:
print(args)
if args.enable_multi_gpu:
debug("Total workers: %d" % (mgw.size()))
debug('Loading data')
data, sample_per_file = get_data(data_path=args.data_path)
# data splitting
if master_worker:
debug(f'Splitting data with seed 0.')
train_data, test_data, _ = split_data(data=data, sizes=(0.9, 0.1, 0.0), seed=0, logger=logger)
# Here the true train data size is the train_data divided by #GPUs
if args.enable_multi_gpu:
args.train_data_size = len(train_data) // mgw.size()
else:
args.train_data_size = len(train_data)
if master_worker:
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(test_data):,}')
# load atom and bond vocabulary and the semantic motif labels.
atom_vocab = MolVocab.load_vocab(args.atom_vocab_path)
bond_vocab = MolVocab.load_vocab(args.bond_vocab_path)
atom_vocab_size, bond_vocab_size = len(atom_vocab), len(bond_vocab)
# Hard coding here, since we haven't load any data yet!
fg_size = 85
shared_dict = {}
mol_collator = GroverCollator(shared_dict=shared_dict, atom_vocab=atom_vocab, bond_vocab=bond_vocab, args=args)
if master_worker:
debug("atom vocab size: %d, bond vocab size: %d, Number of FG tasks: %d" % (atom_vocab_size,
bond_vocab_size, fg_size))
# Define the distributed sampler. If using the single card, the sampler will be None.
train_sampler = None
test_sampler = None
shuffle = True
if args.enable_multi_gpu:
# If not shuffle, the performance may decayed.
train_sampler = DistributedSampler(
train_data, num_replicas=mgw.size(), rank=mgw.rank(), shuffle=True, sample_per_file=sample_per_file)
# Here sample_per_file in test_sampler is None, indicating the test sampler would not divide the test samples by
# rank. (TODO: bad design here.)
test_sampler = DistributedSampler(
test_data, num_replicas=mgw.size(), rank=mgw.rank(), shuffle=False)
train_sampler.set_epoch(args.epochs)
test_sampler.set_epoch(1)
# if we enables multi_gpu training. shuffle should be disabled.
shuffle = False
# Pre load data. (Maybe unnecessary. )
pre_load_data(train_data, rank, num_replicas, sample_per_file)
pre_load_data(test_data, rank, num_replicas)
if master_worker:
# print("Pre-loaded training data: %d" % train_data.count_loaded_datapoints())
print("Pre-loaded test data: %d" % test_data.count_loaded_datapoints())
# Build dataloader
train_data_dl = DataLoader(train_data,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=12,
sampler=train_sampler,
collate_fn=mol_collator)
test_data_dl = DataLoader(test_data,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=10,
sampler=test_sampler,
collate_fn=mol_collator)
# Build the embedding model.
grover_model = GROVEREmbedding(args)
# Build the trainer.
trainer = GROVERTrainer(args=args,
embedding_model=grover_model,
atom_vocab_size=atom_vocab_size,
bond_vocab_size=bond_vocab_size,
fg_szie=fg_size,
train_dataloader=train_data_dl,
test_dataloader=test_data_dl,
optimizer_builder=build_optimizer,
scheduler_builder=build_lr_scheduler,
logger=logger,
with_cuda=with_cuda,
enable_multi_gpu=args.enable_multi_gpu)
# Restore the interrupted training.
model_dir = os.path.join(args.save_dir, "model")
resume_from_epoch = 0
resume_scheduler_step = 0
if master_worker:
resume_from_epoch, resume_scheduler_step = trainer.restore(model_dir)
if args.enable_multi_gpu:
resume_from_epoch = mgw.broadcast(torch.tensor(resume_from_epoch), root_rank=0, name="resume_from_epoch").item()
resume_scheduler_step = mgw.broadcast(torch.tensor(resume_scheduler_step),
root_rank=0, name="resume_scheduler_step").item()
trainer.scheduler.current_step = resume_scheduler_step
print("Restored epoch: %d Restored scheduler step: %d" % (resume_from_epoch, trainer.scheduler.current_step))
trainer.broadcast_parameters()
# Print model details.
if master_worker:
# Change order here.
print(grover_model)
print("Total parameters: %d" % param_count(trainer.grover))
# Perform training.
for epoch in range(resume_from_epoch + 1, args.epochs):
s_time = time.time()
# Data pre-loading.
if args.enable_multi_gpu:
train_sampler.set_epoch(epoch)
train_data.clean_cache()
idxs = train_sampler.get_indices()
for local_gpu_idx in idxs:
train_data.load_data(local_gpu_idx)
d_time = time.time() - s_time
# perform training and validation.
s_time = time.time()
_, train_loss, _ = trainer.train(epoch)
t_time = time.time() - s_time
s_time = time.time()
_, val_loss, detailed_loss_val = trainer.test(epoch)
val_av_loss, val_bv_loss, val_fg_loss, _, _, _ = detailed_loss_val
v_time = time.time() - s_time
# print information.
if master_worker:
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.6f}'.format(train_loss),
'loss_val: {:.6f}'.format(val_loss),
'loss_val_av: {:.6f}'.format(val_av_loss),
'loss_val_bv: {:.6f}'.format(val_bv_loss),
'loss_val_fg: {:.6f}'.format(val_fg_loss),
'cur_lr: {:.5f}'.format(trainer.scheduler.get_lr()[0]),
't_time: {:.4f}s'.format(t_time),
'v_time: {:.4f}s'.format(v_time),
'd_time: {:.4f}s'.format(d_time), flush=True)
if epoch % args.save_interval == 0:
trainer.save(epoch, model_dir)
trainer.save_tmp(epoch, model_dir, rank)
# Only save final version.
if master_worker:
trainer.save(args.epochs, model_dir, "")
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/grovertrainer.py | .py | 11,317 | 280 | """
The GROVER trainer.
"""
import os
import time
from logging import Logger
from typing import List, Tuple
from collections.abc import Callable
import torch
from torch.nn import Module
from torch.utils.data import DataLoader
from grover.model.models import GroverTask
from grover.util.multi_gpu_wrapper import MultiGpuWrapper as mgw
class GROVERTrainer:
def __init__(self,
args,
embedding_model: Module,
atom_vocab_size: int, # atom vocab size
bond_vocab_size: int,
fg_szie: int,
train_dataloader: DataLoader,
test_dataloader: DataLoader,
optimizer_builder: Callable,
scheduler_builder: Callable,
logger: Logger = None,
with_cuda: bool = False,
enable_multi_gpu: bool = False):
"""
The init function of GROVERTrainer
:param args: the input arguments.
:param embedding_model: the model to generate atom/bond embeddings.
:param atom_vocab_size: the vocabulary size of atoms.
:param bond_vocab_size: the vocabulary size of bonds.
:param fg_szie: the size of semantic motifs (functional groups)
:param train_dataloader: the data loader of train data.
:param test_dataloader: the data loader of validation data.
:param optimizer_builder: the function of building the optimizer.
:param scheduler_builder: the function of building the scheduler.
:param logger: the logger
:param with_cuda: enable gpu training.
:param enable_multi_gpu: enable multi_gpu traning.
"""
self.args = args
self.with_cuda = with_cuda
self.grover = embedding_model
self.model = GroverTask(args, embedding_model, atom_vocab_size, bond_vocab_size, fg_szie)
self.loss_func = self.model.get_loss_func(args)
self.enable_multi_gpu = enable_multi_gpu
self.atom_vocab_size = atom_vocab_size
self.bond_vocab_size = bond_vocab_size
self.debug = logger.debug if logger is not None else print
if self.with_cuda:
# print("Using %d GPUs for training." % (torch.cuda.device_count()))
self.model = self.model.cuda()
self.train_data = train_dataloader
self.test_data = test_dataloader
self.optimizer = optimizer_builder(self.model, self.args)
self.scheduler = scheduler_builder(self.optimizer, self.args)
if self.enable_multi_gpu:
self.optimizer = mgw.DistributedOptimizer(self.optimizer,
named_parameters=self.model.named_parameters())
self.args = args
self.n_iter = 0
def broadcast_parameters(self) -> None:
"""
Broadcast parameters before training.
:return: no return.
"""
if self.enable_multi_gpu:
# broadcast parameters & optimizer state.
mgw.broadcast_parameters(self.model.state_dict(), root_rank=0)
mgw.broadcast_optimizer_state(self.optimizer, root_rank=0)
def train(self, epoch: int) -> List:
"""
The training iteration
:param epoch: the current epoch number.
:return: the loss terms of current epoch.
"""
# return self.mock_iter(epoch, self.train_data, train=True)
return self.iter(epoch, self.train_data, train=True)
def test(self, epoch: int) -> List:
"""
The test/validaiion iteration
:param epoch: the current epoch number.
:return: the loss terms as a list
"""
# return self.mock_iter(epoch, self.test_data, train=False)
return self.iter(epoch, self.test_data, train=False)
def mock_iter(self, epoch: int, data_loader: DataLoader, train: bool = True) -> List:
"""
Perform a mock iteration. For test only.
:param epoch: the current epoch number.
:param data_loader: the data loader.
:param train: True: train model, False: validation model.
:return: the loss terms as a list
"""
for _, _ in enumerate(data_loader):
self.scheduler.step()
cum_loss_sum = 0.0
self.n_iter += self.args.batch_size
return self.n_iter, cum_loss_sum, (0, 0, 0, 0, 0, 0)
def iter(self, epoch, data_loader, train=True) -> List:
"""
Perform a training / validation iteration.
:param epoch: the current epoch number.
:param data_loader: the data loader.
:param train: True: train model, False: validation model.
:return: the loss terms as a list
"""
if train:
self.model.train()
else:
self.model.eval()
loss_sum, iter_count = 0, 0
cum_loss_sum, cum_iter_count = 0, 0
av_loss_sum, bv_loss_sum, fg_loss_sum, av_dist_loss_sum, bv_dist_loss_sum, fg_dist_loss_sum = 0, 0, 0, 0, 0, 0
# loss_func = self.model.get_loss_func(self.args)
for _, item in enumerate(data_loader):
batch_graph = item["graph_input"]
targets = item["targets"]
if next(self.model.parameters()).is_cuda:
targets["av_task"] = targets["av_task"].cuda()
targets["bv_task"] = targets["bv_task"].cuda()
targets["fg_task"] = targets["fg_task"].cuda()
preds = self.model(batch_graph)
# # ad-hoc code, for visualizing a model, comment this block when it is not needed
# import dglt.contrib.grover.vis_model as vis_model
# for task in ['av_task', 'bv_task', 'fg_task']:
# vis_graph = vis_model.make_dot(self.model(batch_graph)[task],
# params=dict(self.model.named_parameters()))
# # vis_graph.view()
# vis_graph.render(f"{self.args.backbone}_model_{task}_vis.png", format="png")
# exit()
loss, av_loss, bv_loss, fg_loss, av_dist_loss, bv_dist_loss, fg_dist_loss = self.loss_func(preds, targets)
loss_sum += loss.item()
iter_count += self.args.batch_size
if train:
cum_loss_sum += loss.item()
# Run model
self.model.zero_grad()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
else:
# For eval model, only consider the loss of three task.
cum_loss_sum += av_loss.item()
cum_loss_sum += bv_loss.item()
cum_loss_sum += fg_loss.item()
av_loss_sum += av_loss.item()
bv_loss_sum += bv_loss.item()
fg_loss_sum += fg_loss.item()
av_dist_loss_sum += av_dist_loss.item() if type(av_dist_loss) != float else av_dist_loss
bv_dist_loss_sum += bv_dist_loss.item() if type(bv_dist_loss) != float else bv_dist_loss
fg_dist_loss_sum += fg_dist_loss.item() if type(fg_dist_loss) != float else fg_dist_loss
cum_iter_count += 1
self.n_iter += self.args.batch_size
# Debug only.
# if i % 50 == 0:
# print(f"epoch: {epoch}, batch_id: {i}, av_loss: {av_loss}, bv_loss: {bv_loss}, "
# f"fg_loss: {fg_loss}, av_dist_loss: {av_dist_loss}, bv_dist_loss: {bv_dist_loss}, "
# f"fg_dist_loss: {fg_dist_loss}")
cum_loss_sum /= cum_iter_count
av_loss_sum /= cum_iter_count
bv_loss_sum /= cum_iter_count
fg_loss_sum /= cum_iter_count
av_dist_loss_sum /= cum_iter_count
bv_dist_loss_sum /= cum_iter_count
fg_dist_loss_sum /= cum_iter_count
return self.n_iter, cum_loss_sum, (av_loss_sum, bv_loss_sum, fg_loss_sum, av_dist_loss_sum,
bv_dist_loss_sum, fg_dist_loss_sum)
def save(self, epoch, file_path, name=None) -> str:
"""
Save the intermediate models during training.
:param epoch: the epoch number.
:param file_path: the file_path to save the model.
:return: the output path.
"""
# add specific time in model fine name, in order to distinguish different saved models
now = time.localtime()
if name is None:
name = "_%04d_%02d_%02d_%02d_%02d_%02d" % (
now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
output_path = file_path + name + ".ep%d" % epoch
scaler = None
features_scaler = None
state = {
'args': self.args,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler_step': self.scheduler.current_step,
"epoch": epoch,
'data_scaler': {
'means': scaler.means,
'stds': scaler.stds
} if scaler is not None else None,
'features_scaler': {
'means': features_scaler.means,
'stds': features_scaler.stds
} if features_scaler is not None else None
}
torch.save(state, output_path)
# Is this necessary?
# if self.with_cuda:
# self.model = self.model.cuda()
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
def save_tmp(self, epoch, file_path, rank=0):
"""
Save the models for auto-restore during training.
The model are stored in file_path/tmp folder and will replaced on each epoch.
:param epoch: the epoch number.
:param file_path: the file_path to store the model.
:param rank: the current rank (decrypted).
:return:
"""
store_path = os.path.join(file_path, "tmp")
if not os.path.exists(store_path):
os.makedirs(store_path, exist_ok=True)
store_path = os.path.join(store_path, "model.%d" % rank)
state = {
'args': self.args,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler_step': self.scheduler.current_step,
"epoch": epoch
}
torch.save(state, store_path)
def restore(self, file_path, rank=0) -> Tuple[int, int]:
"""
Restore the training state saved by save_tmp.
:param file_path: the file_path to store the model.
:param rank: the current rank (decrypted).
:return: the restored epoch number and the scheduler_step in scheduler.
"""
cpt_path = os.path.join(file_path, "tmp", "model.%d" % rank)
if not os.path.exists(cpt_path):
print("No checkpoint found %d")
return 0, 0
cpt = torch.load(cpt_path)
self.model.load_state_dict(cpt["state_dict"])
self.optimizer.load_state_dict(cpt["optimizer"])
epoch = cpt["epoch"]
scheduler_step = cpt["scheduler_step"]
self.scheduler.current_step = scheduler_step
print("Restore checkpoint, current epoch: %d" % (epoch))
return epoch, scheduler_step
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/predict.py | .py | 10,630 | 317 | """
The predict function using the finetuned model to make the prediction. .
"""
from argparse import Namespace
from typing import List
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from grover.data import MolCollator
from grover.data import MoleculeDataset
from grover.data import StandardScaler
from grover.util.utils import get_data, get_data_from_smiles, create_logger, load_args, get_task_names, tqdm, \
load_checkpoint, load_scalars
def predict(model: nn.Module,
data: MoleculeDataset,
args: Namespace,
batch_size: int,
loss_func,
logger,
shared_dict,
scaler: StandardScaler = None
) -> List[List[float]]:
"""
Makes predictions on a dataset using an ensemble of models.
:param model: A model.
:param data: A MoleculeDataset.
:param batch_size: Batch size.
:param scaler: A StandardScaler object fit on the training targets.
:return: A list of lists of predictions. The outer list is examples
while the inner list is tasks.
"""
# debug = logger.debug if logger is not None else print
model.eval()
args.bond_drop_rate = 0
preds = []
# num_iters, iter_step = len(data), batch_size
loss_sum, iter_count = 0, 0
mol_collator = MolCollator(args=args, shared_dict=shared_dict)
# mol_dataset = MoleculeDataset(data)
num_workers = 4
mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=num_workers,
collate_fn=mol_collator)
for _, item in enumerate(mol_loader):
_, batch, features_batch, mask, targets = item
class_weights = torch.ones(targets.shape)
if next(model.parameters()).is_cuda:
targets = targets.cuda()
mask = mask.cuda()
class_weights = class_weights.cuda()
with torch.no_grad():
batch_preds = model(batch, features_batch)
iter_count += 1
if args.fingerprint:
preds.extend(batch_preds.data.cpu().numpy())
continue
if loss_func is not None:
loss = loss_func(batch_preds, targets) * class_weights * mask
loss = loss.sum() / mask.sum()
loss_sum += loss.item()
# Collect vectors
batch_preds = batch_preds.data.cpu().numpy().tolist()
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
preds.extend(batch_preds)
loss_avg = loss_sum / iter_count
return preds, loss_avg
def make_predictions(args: Namespace, newest_train_args=None, smiles: List[str] = None):
"""
Makes predictions. If smiles is provided, makes predictions on smiles.
Otherwise makes predictions on args.test_data.
:param args: Arguments.
:param smiles: Smiles to make predictions on.
:return: A list of lists of target predictions.
"""
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
print('Loading training args')
path = args.checkpoint_paths[0]
scaler, features_scaler = load_scalars(path)
train_args = load_args(path)
# Update args with training arguments saved in checkpoint
for key, value in vars(train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
# update args with newest training args
if newest_train_args is not None:
for key, value in vars(newest_train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
# deal with multiprocess problem
args.debug = True
logger = create_logger('predict', quiet=False)
print('Loading data')
args.task_names = get_task_names(args.data_path)
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=False)
else:
test_data = get_data(path=args.data_path, args=args,
use_compound_names=args.use_compound_names, skip_invalid_smiles=False)
args.num_tasks = test_data.num_tasks()
args.features_size = test_data.features_size()
print('Validating SMILES')
valid_indices = [i for i in range(len(test_data))]
full_data = test_data
# test_data = MoleculeDataset([test_data[i] for i in valid_indices])
test_data_list = []
for i in valid_indices:
test_data_list.append(test_data[i])
test_data = MoleculeDataset(test_data_list)
# Edge case if empty list of smiles is provided
if len(test_data) == 0:
return [None] * len(full_data)
print(f'Test size = {len(test_data):,}')
# Normalize features
if hasattr(train_args, 'features_scaling'):
if train_args.features_scaling:
test_data.normalize_features(features_scaler)
# Predict with each model individually and sum predictions
if hasattr(args, 'num_tasks'):
sum_preds = np.zeros((len(test_data), args.num_tasks))
print(f'Predicting...')
shared_dict = {}
# loss_func = torch.nn.BCEWithLogitsLoss()
count = 0
for checkpoint_path in tqdm(args.checkpoint_paths, total=len(args.checkpoint_paths)):
# Load model
model = load_checkpoint(checkpoint_path, cuda=args.cuda, current_args=args, logger=logger)
model_preds, _ = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler,
shared_dict=shared_dict,
args=args,
logger=logger,
loss_func=None
)
if args.fingerprint:
return model_preds
sum_preds += np.array(model_preds, dtype=float)
count += 1
# Ensemble predictions
avg_preds = sum_preds / len(args.checkpoint_paths)
# Save predictions
assert len(test_data) == len(avg_preds)
# Put Nones for invalid smiles
args.valid_indices = valid_indices
avg_preds = np.array(avg_preds)
test_smiles = full_data.smiles()
return avg_preds, test_smiles
def write_prediction(avg_preds, test_smiles, args):
"""
write prediction to disk
:param avg_preds: prediction value
:param test_smiles: input smiles
:param args: Arguments
"""
if args.dataset_type == 'multiclass':
avg_preds = np.argmax(avg_preds, -1)
full_preds = [[None]] * len(test_smiles)
for i, si in enumerate(args.valid_indices):
full_preds[si] = avg_preds[i]
result = pd.DataFrame(data=full_preds, index=test_smiles, columns=args.task_names)
result.to_csv(args.output_path)
print(f'Saving predictions to {args.output_path}')
def evaluate_predictions(preds: List[List[float]],
targets: List[List[float]],
num_tasks: int,
metric_func,
dataset_type: str,
logger = None) -> List[float]:
"""
Evaluates predictions using a metric function and filtering out invalid targets.
:param preds: A list of lists of shape (data_size, num_tasks) with model predictions.
:param targets: A list of lists of shape (data_size, num_tasks) with targets.
:param num_tasks: Number of tasks.
:param metric_func: Metric function which takes in a list of targets and a list of predictions.
:param dataset_type: Dataset type.
:param logger: Logger.
:return: A list with the score for each task based on `metric_func`.
"""
if dataset_type == 'multiclass':
results = metric_func(np.argmax(preds, -1), [i[0] for i in targets])
return [results]
# info = logger.info if logger is not None else print
if len(preds) == 0:
return [float('nan')] * num_tasks
# Filter out empty targets
# valid_preds and valid_targets have shape (num_tasks, data_size)
valid_preds = [[] for _ in range(num_tasks)]
valid_targets = [[] for _ in range(num_tasks)]
for i in range(num_tasks):
for j in range(len(preds)):
if targets[j][i] is not None: # Skip those without targets
valid_preds[i].append(preds[j][i])
valid_targets[i].append(targets[j][i])
# Compute metric
results = []
for i in range(num_tasks):
# # Skip if all targets or preds are identical, otherwise we'll crash during classification
if dataset_type == 'classification':
nan = False
if all(target == 0 for target in valid_targets[i]) or all(target == 1 for target in valid_targets[i]):
nan = True
# info('Warning: Found a task with targets all 0s or all 1s')
if all(pred == 0 for pred in valid_preds[i]) or all(pred == 1 for pred in valid_preds[i]):
nan = True
# info('Warning: Found a task with predictions all 0s or all 1s')
if nan:
results.append(float('nan'))
continue
if len(valid_targets[i]) == 0:
continue
results.append(metric_func(valid_targets[i], valid_preds[i]))
return results
def evaluate(model: nn.Module,
data: MoleculeDataset,
num_tasks: int,
metric_func,
loss_func,
batch_size: int,
dataset_type: str,
args: Namespace,
shared_dict,
scaler: StandardScaler = None,
logger = None) -> List[float]:
"""
Evaluates an ensemble of models on a dataset.
:param model: A model.
:param data: A MoleculeDataset.
:param num_tasks: Number of tasks.
:param metric_func: Metric function which takes in a list of targets and a list of predictions.
:param batch_size: Batch size.
:param dataset_type: Dataset type.
:param scaler: A StandardScaler object fit on the training targets.
:param logger: Logger.
:return: A list with the score for each task based on `metric_func`.
"""
preds, loss_avg = predict(
model=model,
data=data,
loss_func=loss_func,
batch_size=batch_size,
scaler=scaler,
shared_dict=shared_dict,
logger=logger,
args=args
)
targets = data.targets()
if scaler is not None:
targets = scaler.inverse_transform(targets)
results = evaluate_predictions(
preds=preds,
targets=targets,
num_tasks=num_tasks,
metric_func=metric_func,
dataset_type=dataset_type,
logger=logger
)
return results, loss_avg
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/__init__.py | .py | 0 | 0 | null | Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/task/cross_validate.py | .py | 2,514 | 70 | """
The cross validation function for finetuning.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/train/cross_validate.py
"""
import os
import time
from argparse import Namespace
from logging import Logger
from typing import Tuple
import numpy as np
from grover.util.utils import get_task_names
from grover.util.utils import makedirs
from task.run_evaluation import run_evaluation
from task.train import run_training
def cross_validate(args: Namespace, logger: Logger = None) -> Tuple[float, float]:
"""
k-fold cross validation.
:return: A tuple of mean_score and std_score.
"""
info = logger.info if logger is not None else print
# Initialize relevant variables
init_seed = args.seed
save_dir = args.save_dir
task_names = get_task_names(args.data_path)
# Run training with different random seeds for each fold
all_scores = []
time_start = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
for fold_num in range(args.num_folds):
info(f'Fold {fold_num}')
args.seed = init_seed + fold_num
args.save_dir = os.path.join(save_dir, f'fold_{fold_num}')
makedirs(args.save_dir)
if args.parser_name == "finetune":
model_scores = run_training(args, time_start, logger)
else:
model_scores = run_evaluation(args, logger)
all_scores.append(model_scores)
all_scores = np.array(all_scores)
# Report scores for each fold
info(f'{args.num_folds}-fold cross validation')
for fold_num, scores in enumerate(all_scores):
info(f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}')
if args.show_individual_scores:
for task_name, score in zip(task_names, scores):
info(f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}')
# Report scores across models
avg_scores = np.nanmean(all_scores, axis=1) # average score for each model across tasks
mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
info(f'overall_{args.split_type}_test_{args.metric}={mean_score:.6f}')
info(f'std={std_score:.6f}')
if args.show_individual_scores:
for task_num, task_name in enumerate(task_names):
info(f'Overall test {task_name} {args.metric} = '
f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}')
return mean_score, std_score
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/scripts/build_vocab.py | .py | 1,704 | 44 | """
The vocabulary building scripts.
"""
import os
import sys
sys.path.append(f"/work/yufeng/2022/enzyme_specificity/src/other_softwares/grover_software")
from grover.data.torchvocab import MolVocab
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default="../../dataset/grover_new_dataset/druglike_merged_refine2.csv", type=str)
parser.add_argument('--vocab_save_folder', default="../../dataset/grover_new_dataset", type=str)
parser.add_argument('--dataset_name', type=str, default=None,
help="Will be the first part of the vocab file name. If it is None,"
"the vocab files will be: atom_vocab.pkl and bond_vocab.pkl")
parser.add_argument('--vocab_max_size', type=int, default=None)
parser.add_argument('--vocab_min_freq', type=int, default=1)
args = parser.parse_args()
# fin = open(args.data_path, 'r')
# lines = fin.readlines()
for vocab_type in ['atom', 'bond']:
vocab_file = f"{vocab_type}_vocab.pkl"
if args.dataset_name is not None:
vocab_file = args.dataset_name + '_' + vocab_file
vocab_save_path = os.path.join(args.vocab_save_folder, vocab_file)
os.makedirs(os.path.dirname(vocab_save_path), exist_ok=True)
vocab = MolVocab(file_path=args.data_path,
max_size=args.vocab_max_size,
min_freq=args.vocab_min_freq,
num_workers=100,
vocab_type=vocab_type)
print(f"{vocab_type} vocab size", len(vocab))
vocab.save_vocab(vocab_save_path)
if __name__ == '__main__':
build()
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/scripts/save_features.py | .py | 4,808 | 128 | """
Computes and saves molecular features for a dataset.
"""
import os
import shutil
import sys
from argparse import ArgumentParser, Namespace
from multiprocessing import Pool
from typing import List, Tuple
from tqdm import tqdm
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from grover.util.utils import get_data, makedirs, load_features, save_features
from grover.data.molfeaturegenerator import get_available_features_generators, \
get_features_generator
from grover.data.task_labels import rdkit_functional_group_label_features_generator
def load_temp(temp_dir: str) -> Tuple[List[List[float]], int]:
"""
Loads all features saved as .npz files in load_dir.
Assumes temporary files are named in order 0.npz, 1.npz, ...
:param temp_dir: Directory in which temporary .npz files containing features are stored.
:return: A tuple with a list of molecule features, where each molecule's features is a list of floats,
and the number of temporary files.
"""
features = []
temp_num = 0
temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
while os.path.exists(temp_path):
features.extend(load_features(temp_path))
temp_num += 1
temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
return features, temp_num
def generate_and_save_features(args: Namespace):
"""
Computes and saves features for a dataset of molecules as a 2D array in a .npz file.
:param args: Arguments.
"""
# Create directory for save_path
makedirs(args.save_path, isfile=True)
# Get data and features function
data = get_data(path=args.data_path, max_data_size=None)
features_generator = get_features_generator(args.features_generator)
temp_save_dir = args.save_path + '_temp'
# Load partially complete data
if args.restart:
if os.path.exists(args.save_path):
os.remove(args.save_path)
if os.path.exists(temp_save_dir):
shutil.rmtree(temp_save_dir)
else:
if os.path.exists(args.save_path):
raise ValueError(f'"{args.save_path}" already exists and args.restart is False.')
if os.path.exists(temp_save_dir):
features, temp_num = load_temp(temp_save_dir)
if not os.path.exists(temp_save_dir):
makedirs(temp_save_dir)
features, temp_num = [], 0
# Build features map function
data = data[len(features):] # restrict to data for which features have not been computed yet
mols = (d.smiles for d in data)
if args.sequential:
features_map = map(features_generator, mols)
else:
features_map = Pool(30).imap(features_generator, mols)
# Get features
temp_features = []
for i, feats in tqdm(enumerate(features_map), total=len(data)):
temp_features.append(feats)
# Save temporary features every save_frequency
if (i > 0 and (i + 1) % args.save_frequency == 0) or i == len(data) - 1:
save_features(os.path.join(temp_save_dir, f'{temp_num}.npz'), temp_features)
features.extend(temp_features)
temp_features = []
temp_num += 1
try:
# Save all features
save_features(args.save_path, features)
# Remove temporary features
shutil.rmtree(temp_save_dir)
except OverflowError:
print('Features array is too large to save as a single file. Instead keeping features as a directory of files.')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--data_path', type=str, required=True,
help='Path to data CSV')
parser.add_argument('--features_generator', type=str, required=True,
choices=get_available_features_generators(),
help='Type of features to generate')
parser.add_argument('--save_path', type=str, default=None,
help='Path to .npz file where features will be saved as a compressed numpy archive')
parser.add_argument('--save_frequency', type=int, default=10000,
help='Frequency with which to save the features')
parser.add_argument('--restart', action='store_true', default=False,
help='Whether to not load partially complete featurization and instead start from scratch')
parser.add_argument('--max_data_size', type=int,
help='Maximum number of data points to load')
parser.add_argument('--sequential', action='store_true', default=False,
help='Whether to task sequentially rather than in parallel')
args = parser.parse_args()
if args.save_path is None:
args.save_path = args.data_path.split('csv')[0] + 'npz'
generate_and_save_features(args)
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/scripts/__init__.py | .py | 0 | 0 | null | Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/scripts/split_data.py | .py | 2,606 | 88 | """
The data splitting script for pretraining.
"""
import os
from argparse import ArgumentParser
import csv
import shutil
import numpy as np
import grover.util.utils as fea_utils
parser = ArgumentParser()
parser.add_argument("--data_path", default="../drug_data/grover_data/delaneyfreesolvlipo.csv")
parser.add_argument("--features_path", default="../drug_data/grover_data/delaneyfreesolvlipo_molbert.npz")
parser.add_argument("--sample_per_file", type=int, default=1000)
parser.add_argument("--output_path", default="../drug_data/grover_data/delaneyfreesolvlipo")
def load_smiles(data_path):
with open(data_path) as f:
reader = csv.reader(f)
header = next(reader)
res = []
for line in reader:
res.append(line)
return res, header
def load_features(data_path):
fea = fea_utils.load_features(data_path)
return fea
def save_smiles(data_path, index, data, header):
fn = os.path.join(data_path, str(index) + ".csv")
with open(fn, "w") as f:
fw = csv.writer(f)
fw.writerow(header)
for d in data:
fw.writerow(d)
def save_features(data_path, index, data):
fn = os.path.join(data_path, str(index) + ".npz")
np.savez_compressed(fn, features=data)
def run():
args = parser.parse_args()
res, header = load_smiles(data_path=args.data_path)
fea = load_features(data_path=args.features_path)
assert len(res) == fea.shape[0]
n_graphs = len(res)
perm = np.random.permutation(n_graphs)
nfold = int(n_graphs / args.sample_per_file + 1)
print("Number of files: %d" % nfold)
if os.path.exists(args.output_path):
shutil.rmtree(args.output_path)
os.makedirs(args.output_path, exist_ok=True)
graph_path = os.path.join(args.output_path, "graph")
fea_path = os.path.join(args.output_path, "feature")
os.makedirs(graph_path, exist_ok=True)
os.makedirs(fea_path, exist_ok=True)
for i in range(nfold):
sidx = i * args.sample_per_file
eidx = min((i + 1) * args.sample_per_file, n_graphs)
indexes = perm[sidx:eidx]
sres = [res[j] for j in indexes]
sfea = fea[indexes]
save_smiles(graph_path, i, sres, header)
save_features(fea_path, i, sfea)
summary_path = os.path.join(args.output_path, "summary.txt")
summary_fout = open(summary_path, 'w')
summary_fout.write("n_files:%d\n" % nfold)
summary_fout.write("n_samples:%d\n" % n_graphs)
summary_fout.write("sample_per_file:%d\n" % args.sample_per_file)
summary_fout.close()
if __name__ == "__main__":
run()
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/model/models.py | .py | 22,143 | 509 | """
The GROVER models for pretraining, finetuning and fingerprint generating.
"""
from argparse import Namespace
from typing import List, Dict, Callable
import numpy as np
import torch
from torch import nn as nn
from grover.data import get_atom_fdim, get_bond_fdim
from grover.model.layers import Readout, GTransEncoder
from grover.util.nn_utils import get_activation_function
class GROVEREmbedding(nn.Module):
"""
The GROVER Embedding class. It contains the GTransEncoder.
This GTransEncoder can be replaced by any validate encoders.
"""
def __init__(self, args: Namespace):
"""
Initialize the GROVEREmbedding class.
:param args:
"""
super(GROVEREmbedding, self).__init__()
self.embedding_output_type = args.embedding_output_type
edge_dim = get_bond_fdim() + get_atom_fdim()
node_dim = get_atom_fdim()
if not hasattr(args, "backbone"):
print("No backbone specified in args, use gtrans backbone.")
args.backbone = "gtrans"
if args.backbone == "gtrans" or args.backbone == "dualtrans":
# dualtrans is the old name.
if not hasattr(args, "dropout"):
args.dropout = 0
self.encoders = GTransEncoder(args,
hidden_size=args.hidden_size,
edge_fdim=edge_dim,
node_fdim=node_dim,
dropout=args.dropout,
activation=args.activation,
num_mt_block=args.num_mt_block,
num_attn_head=args.num_attn_head,
atom_emb_output=self.embedding_output_type,
bias=args.bias,
cuda=args.cuda)
def forward(self, graph_batch: List) -> Dict:
"""
The forward function takes graph_batch as input and output a dict. The content of the dict is decided by
self.embedding_output_type.
:param graph_batch: the input graph batch generated by MolCollator.
:return: a dict containing the embedding results.
"""
output = self.encoders(graph_batch)
if self.embedding_output_type == 'atom':
return {"atom_from_atom": output[0], "atom_from_bond": output[1],
"bond_from_atom": None, "bond_from_bond": None} # atom_from_atom, atom_from_bond
elif self.embedding_output_type == 'bond':
return {"atom_from_atom": None, "atom_from_bond": None,
"bond_from_atom": output[0], "bond_from_bond": output[1]} # bond_from_atom, bond_from_bond
elif self.embedding_output_type == "both":
return {"atom_from_atom": output[0][0], "bond_from_atom": output[0][1],
"atom_from_bond": output[1][0], "bond_from_bond": output[1][1]}
class AtomVocabPrediction(nn.Module):
"""
The atom-wise vocabulary prediction task. The atom vocabulary is constructed by the context.
"""
def __init__(self, args, vocab_size, hidden_size=None):
"""
:param args: the argument.
:param vocab_size: the size of atom vocabulary.
"""
super(AtomVocabPrediction, self).__init__()
if not hidden_size:
hidden_size = args.hidden_size
self.linear = nn.Linear(hidden_size, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
If embeddings is None: do not go through forward pass.
:param embeddings: the atom embeddings, num_atom X fea_dim.
:return: the prediction for each atom, num_atom X vocab_size.
"""
if embeddings is None:
return None
return self.logsoftmax(self.linear(embeddings))
class BondVocabPrediction(nn.Module):
"""
The bond-wise vocabulary prediction task. The bond vocabulary is constructed by the context.
"""
def __init__(self, args, vocab_size, hidden_size=None):
"""
Might need to use different architecture for bond vocab prediction.
:param args:
:param vocab_size: size of bond vocab.
:param hidden_size: hidden size
"""
super(BondVocabPrediction, self).__init__()
if not hidden_size:
hidden_size = args.hidden_size
self.linear = nn.Linear(hidden_size, vocab_size)
# ad-hoc here
# If TWO_FC_4_BOND_VOCAB, we will use two distinct fc layer to deal with the bond and rev bond.
self.TWO_FC_4_BOND_VOCAB = True
if self.TWO_FC_4_BOND_VOCAB:
self.linear_rev = nn.Linear(hidden_size, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
If embeddings is None: do not go through forward pass.
:param embeddings: the atom embeddings, num_bond X fea_dim.
:return: the prediction for each atom, num_bond X vocab_size.
"""
if embeddings is None:
return None
nm_bonds = embeddings.shape[0] # must be an odd number
# The bond and rev bond have odd and even ids respectively. See definition in molgraph.
ids1 = [0] + list(range(1, nm_bonds, 2))
ids2 = list(range(0, nm_bonds, 2))
if self.TWO_FC_4_BOND_VOCAB:
logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])
else:
logits = self.linear(embeddings[ids1] + embeddings[ids2])
return self.logsoftmax(logits)
class FunctionalGroupPrediction(nn.Module):
"""
The functional group (semantic motifs) prediction task. This is a graph-level task.
"""
def __init__(self, args, fg_size):
"""
:param args: The arguments.
:param fg_size: The size of semantic motifs.
"""
super(FunctionalGroupPrediction, self).__init__()
first_linear_dim = args.hidden_size
hidden_size = args.hidden_size
# In order to retain maximal information in the encoder, we use a simple readout function here.
self.readout = Readout(rtype="mean", hidden_size=hidden_size)
# We have four branches here. But the input with less than four branch is OK.
# Since we use BCEWithLogitsLoss as the loss function, we only need to output logits here.
self.linear_atom_from_atom = nn.Linear(first_linear_dim, fg_size)
self.linear_atom_from_bond = nn.Linear(first_linear_dim, fg_size)
self.linear_bond_from_atom = nn.Linear(first_linear_dim, fg_size)
self.linear_bond_from_bond = nn.Linear(first_linear_dim, fg_size)
def forward(self, embeddings: Dict, ascope: List, bscope: List) -> Dict:
"""
The forward function of semantic motif prediction. It takes the node/bond embeddings, and the corresponding
atom/bond scope as input and produce the prediction logits for different branches.
:param embeddings: The input embeddings are organized as dict. The output of GROVEREmbedding.
:param ascope: The scope for bonds. Please refer BatchMolGraph for more details.
:param bscope: The scope for aotms. Please refer BatchMolGraph for more details.
:return: a dict contains the predicted logits.
"""
preds_atom_from_atom, preds_atom_from_bond, preds_bond_from_atom, preds_bond_from_bond = \
None, None, None, None
if embeddings["bond_from_atom"] is not None:
preds_bond_from_atom = self.linear_bond_from_atom(self.readout(embeddings["bond_from_atom"], bscope))
if embeddings["bond_from_bond"] is not None:
preds_bond_from_bond = self.linear_bond_from_bond(self.readout(embeddings["bond_from_bond"], bscope))
if embeddings["atom_from_atom"] is not None:
preds_atom_from_atom = self.linear_atom_from_atom(self.readout(embeddings["atom_from_atom"], ascope))
if embeddings["atom_from_bond"] is not None:
preds_atom_from_bond = self.linear_atom_from_bond(self.readout(embeddings["atom_from_bond"], ascope))
return {"atom_from_atom": preds_atom_from_atom, "atom_from_bond": preds_atom_from_bond,
"bond_from_atom": preds_bond_from_atom, "bond_from_bond": preds_bond_from_bond}
class GroverTask(nn.Module):
"""
The pretrain module.
"""
def __init__(self, args, grover, atom_vocab_size, bond_vocab_size, fg_size):
super(GroverTask, self).__init__()
self.grover = grover
self.av_task_atom = AtomVocabPrediction(args, atom_vocab_size)
self.av_task_bond = AtomVocabPrediction(args, atom_vocab_size)
self.bv_task_atom = BondVocabPrediction(args, bond_vocab_size)
self.bv_task_bond = BondVocabPrediction(args, bond_vocab_size)
self.fg_task_all = FunctionalGroupPrediction(args, fg_size)
self.embedding_output_type = args.embedding_output_type
@staticmethod
def get_loss_func(args: Namespace) -> Callable:
"""
The loss function generator.
:param args: the arguments.
:return: the loss fucntion for GroverTask.
"""
def loss_func(preds, targets, dist_coff=args.dist_coff):
"""
The loss function for GroverTask.
:param preds: the predictions.
:param targets: the targets.
:param dist_coff: the default disagreement coefficient for the distances between different branches.
:return:
"""
av_task_loss = nn.NLLLoss(ignore_index=0, reduction="mean") # same for av and bv
fg_task_loss = nn.BCEWithLogitsLoss(reduction="mean")
# av_task_dist_loss = nn.KLDivLoss(reduction="mean")
av_task_dist_loss = nn.MSELoss(reduction="mean")
fg_task_dist_loss = nn.MSELoss(reduction="mean")
sigmoid = nn.Sigmoid()
av_atom_loss, av_bond_loss, av_dist_loss = 0.0, 0.0, 0.0
fg_atom_from_atom_loss, fg_atom_from_bond_loss, fg_atom_dist_loss = 0.0, 0.0, 0.0
bv_atom_loss, bv_bond_loss, bv_dist_loss = 0.0, 0.0, 0.0
fg_bond_from_atom_loss, fg_bond_from_bond_loss, fg_bond_dist_loss = 0.0, 0.0, 0.0
if preds["av_task"][0] is not None:
av_atom_loss = av_task_loss(preds['av_task'][0], targets["av_task"])
fg_atom_from_atom_loss = fg_task_loss(preds["fg_task"]["atom_from_atom"], targets["fg_task"])
if preds["av_task"][1] is not None:
av_bond_loss = av_task_loss(preds['av_task'][1], targets["av_task"])
fg_atom_from_bond_loss = fg_task_loss(preds["fg_task"]["atom_from_bond"], targets["fg_task"])
if preds["bv_task"][0] is not None:
bv_atom_loss = av_task_loss(preds['bv_task'][0], targets["bv_task"])
fg_bond_from_atom_loss = fg_task_loss(preds["fg_task"]["bond_from_atom"], targets["fg_task"])
if preds["bv_task"][1] is not None:
bv_bond_loss = av_task_loss(preds['bv_task'][1], targets["bv_task"])
fg_bond_from_bond_loss = fg_task_loss(preds["fg_task"]["bond_from_bond"], targets["fg_task"])
if preds["av_task"][0] is not None and preds["av_task"][1] is not None:
av_dist_loss = av_task_dist_loss(preds['av_task'][0], preds['av_task'][1])
fg_atom_dist_loss = fg_task_dist_loss(sigmoid(preds["fg_task"]["atom_from_atom"]),
sigmoid(preds["fg_task"]["atom_from_bond"]))
if preds["bv_task"][0] is not None and preds["bv_task"][1] is not None:
bv_dist_loss = av_task_dist_loss(preds['bv_task'][0], preds['bv_task'][1])
fg_bond_dist_loss = fg_task_dist_loss(sigmoid(preds["fg_task"]["bond_from_atom"]),
sigmoid(preds["fg_task"]["bond_from_bond"]))
av_loss = av_atom_loss + av_bond_loss
bv_loss = bv_atom_loss + bv_bond_loss
fg_atom_loss = fg_atom_from_atom_loss + fg_atom_from_bond_loss
fg_bond_loss = fg_bond_from_atom_loss + fg_bond_from_bond_loss
fg_loss = fg_atom_loss + fg_bond_loss
fg_dist_loss = fg_atom_dist_loss + fg_bond_dist_loss
# dist_loss = av_dist_loss + bv_dist_loss + fg_dist_loss
# print("%.4f %.4f %.4f %.4f %.4f %.4f"%(av_atom_loss,
# av_bond_loss,
# fg_atom_loss,
# fg_bond_loss,
# av_dist_loss,
# fg_dist_loss))
# return av_loss + fg_loss + dist_coff * dist_loss
overall_loss = av_loss + bv_loss + fg_loss + dist_coff * av_dist_loss + \
dist_coff * bv_dist_loss + fg_dist_loss
return overall_loss, av_loss, bv_loss, fg_loss, av_dist_loss, bv_dist_loss, fg_dist_loss
return loss_func
def forward(self, graph_batch: List):
"""
The forward function.
:param graph_batch:
:return:
"""
_, _, _, _, _, a_scope, b_scope, _ = graph_batch
a_scope = a_scope.data.cpu().numpy().tolist()
embeddings = self.grover(graph_batch)
av_task_pred_atom = self.av_task_atom(
embeddings["atom_from_atom"]) # if None: means not go through this fowward
av_task_pred_bond = self.av_task_bond(embeddings["atom_from_bond"])
bv_task_pred_atom = self.bv_task_atom(embeddings["bond_from_atom"])
bv_task_pred_bond = self.bv_task_bond(embeddings["bond_from_bond"])
fg_task_pred_all = self.fg_task_all(embeddings, a_scope, b_scope)
return {"av_task": (av_task_pred_atom, av_task_pred_bond),
"bv_task": (bv_task_pred_atom, bv_task_pred_bond),
"fg_task": fg_task_pred_all}
class GroverFpGeneration(nn.Module):
"""
GroverFpGeneration class.
It loads the pre-trained model and produce the fingerprints for input molecules.
"""
def __init__(self, args):
"""
Init function.
:param args: the arguments.
"""
super(GroverFpGeneration, self).__init__()
self.fingerprint_source = args.fingerprint_source
self.iscuda = args.cuda
self.grover = GROVEREmbedding(args)
self.readout = Readout(rtype="mean", hidden_size=args.hidden_size)
def forward(self, batch, features_batch):
"""
The forward function.
It takes graph batch and molecular feature batch as input and produce the fingerprints of this molecules.
:param batch:
:param features_batch:
:return:
"""
_, _, _, _, _, a_scope, b_scope, _ = batch
output = self.grover(batch)
# Share readout
mol_atom_from_bond_output = self.readout(output["atom_from_bond"], a_scope)
mol_atom_from_atom_output = self.readout(output["atom_from_atom"], a_scope)
if self.fingerprint_source == "bond" or self.fingerprint_source == "both":
mol_bond_from_atom_output = self.readout(output["bond_from_atom"], b_scope)
mol_bond_from_bodd_output = self.readout(output["bond_from_bond"], b_scope)
if features_batch[0] is not None:
features_batch = torch.from_numpy(np.stack(features_batch)).float()
if self.iscuda:
features_batch = features_batch.cuda()
features_batch = features_batch.to(output["atom_from_atom"])
if len(features_batch.shape) == 1:
features_batch = features_batch.view([1, features_batch.shape[0]])
else:
features_batch = None
if self.fingerprint_source == "atom":
fp = torch.cat([mol_atom_from_atom_output, mol_atom_from_bond_output], 1)
elif self.fingerprint_source == "bond":
fp = torch.cat([mol_bond_from_atom_output, mol_bond_from_bodd_output], 1)
else:
# the both case.
fp = torch.cat([mol_atom_from_atom_output, mol_atom_from_bond_output,
mol_bond_from_atom_output, mol_bond_from_bodd_output], 1)
if features_batch is not None:
fp = torch.cat([fp, features_batch], 1)
return output["atom_from_bond"], output["atom_from_atom"], fp
class GroverFinetuneTask(nn.Module):
"""
The finetune
"""
def __init__(self, args):
super(GroverFinetuneTask, self).__init__()
self.hidden_size = args.hidden_size
self.iscuda = args.cuda
self.grover = GROVEREmbedding(args)
if args.self_attention:
self.readout = Readout(rtype="self_attention", hidden_size=self.hidden_size,
attn_hidden=args.attn_hidden,
attn_out=args.attn_out)
else:
self.readout = Readout(rtype="mean", hidden_size=self.hidden_size)
self.mol_atom_from_atom_ffn = self.create_ffn(args)
self.mol_atom_from_bond_ffn = self.create_ffn(args)
#self.ffn = nn.ModuleList()
#self.ffn.append(self.mol_atom_from_atom_ffn)
#self.ffn.append(self.mol_atom_from_bond_ffn)
self.classification = args.dataset_type == 'classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
def create_ffn(self, args: Namespace):
"""
Creates the feed-forward network for the model.
:param args: Arguments.
"""
# Note: args.features_dim is set according the real loaded features data
if args.features_only:
first_linear_dim = args.features_size + args.features_dim
else:
if args.self_attention:
first_linear_dim = args.hidden_size * args.attn_out
# TODO: Ad-hoc!
# if args.use_input_features:
first_linear_dim += args.features_dim
else:
first_linear_dim = args.hidden_size + args.features_dim
dropout = nn.Dropout(args.dropout)
activation = get_activation_function(args.activation)
# TODO: ffn_hidden_size
# Create FFN layers
if args.ffn_num_layers == 1:
ffn = [
dropout,
nn.Linear(first_linear_dim, args.output_size)
]
else:
ffn = [
dropout,
nn.Linear(first_linear_dim, args.ffn_hidden_size)
]
for _ in range(args.ffn_num_layers - 2):
ffn.extend([
activation,
dropout,
nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
])
ffn.extend([
activation,
dropout,
nn.Linear(args.ffn_hidden_size, args.output_size),
])
# Create FFN model
return nn.Sequential(*ffn)
@staticmethod
def get_loss_func(args):
def loss_func(preds, targets,
dt=args.dataset_type,
dist_coff=args.dist_coff):
if dt == 'classification':
pred_loss = nn.BCEWithLogitsLoss(reduction='none')
elif dt == 'regression':
pred_loss = nn.MSELoss(reduction='none')
else:
raise ValueError(f'Dataset type "{args.dataset_type}" not supported.')
# print(type(preds))
# TODO: Here, should we need to involve the model status? Using len(preds) is just a hack.
if type(preds) is not tuple:
# in eval mode.
return pred_loss(preds, targets)
# in train mode.
dist_loss = nn.MSELoss(reduction='none')
# dist_loss = nn.CosineSimilarity(dim=0)
# print(pred_loss)
dist = dist_loss(preds[0], preds[1])
pred_loss1 = pred_loss(preds[0], targets)
pred_loss2 = pred_loss(preds[1], targets)
return pred_loss1 + pred_loss2 + dist_coff * dist
return loss_func
def forward(self, batch, features_batch):
_, _, _, _, _, a_scope, _, _ = batch
output = self.grover(batch)
# Share readout
mol_atom_from_bond_output = self.readout(output["atom_from_bond"], a_scope)
mol_atom_from_atom_output = self.readout(output["atom_from_atom"], a_scope)
if features_batch[0] is not None:
features_batch = torch.from_numpy(np.stack(features_batch)).float()
if self.iscuda:
features_batch = features_batch.cuda()
features_batch = features_batch.to(output["atom_from_atom"])
if len(features_batch.shape) == 1:
features_batch = features_batch.view([1, features_batch.shape[0]])
else:
features_batch = None
if features_batch is not None:
mol_atom_from_atom_output = torch.cat([mol_atom_from_atom_output, features_batch], 1)
mol_atom_from_bond_output = torch.cat([mol_atom_from_bond_output, features_batch], 1)
if self.training:
atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output)
bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output)
return atom_ffn_output, bond_ffn_output
else:
atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output)
bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output)
if self.classification:
atom_ffn_output = self.sigmoid(atom_ffn_output)
bond_ffn_output = self.sigmoid(bond_ffn_output)
output = (atom_ffn_output + bond_ffn_output) / 2
return output
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/model/layers.py | .py | 39,294 | 904 | """
The basic building blocks in model.
"""
import math
from argparse import Namespace
from typing import Union
import numpy
import scipy.stats as stats
import torch
from torch import nn as nn
from torch.nn import LayerNorm, functional as F
from grover.util.nn_utils import get_activation_function, select_neighbor_and_aggregate
class SelfAttention(nn.Module):
"""
Self SelfAttention Layer
Given $X\in \mathbb{R}^{n \times in_feature}$, the attention is calculated by: $a=Softmax(W_2tanh(W_1X))$, where
$W_1 \in \mathbb{R}^{hidden \times in_feature}$, $W_2 \in \mathbb{R}^{out_feature \times hidden}$.
The final output is: $out=aX$, which is unrelated with input $n$.
"""
def __init__(self, *, hidden, in_feature, out_feature):
"""
The init function.
:param hidden: the hidden dimension, can be viewed as the number of experts.
:param in_feature: the input feature dimension.
:param out_feature: the output feature dimension.
"""
super(SelfAttention, self).__init__()
self.w1 = torch.nn.Parameter(torch.FloatTensor(hidden, in_feature))
self.w2 = torch.nn.Parameter(torch.FloatTensor(out_feature, hidden))
self.reset_parameters()
def reset_parameters(self):
"""
Use xavier_normal method to initialize parameters.
"""
nn.init.xavier_normal_(self.w1)
nn.init.xavier_normal_(self.w2)
def forward(self, X):
"""
The forward function.
:param X: The input feature map. $X \in \mathbb{R}^{n \times in_feature}$.
:return: The final embeddings and attention matrix.
"""
x = torch.tanh(torch.matmul(self.w1, X.transpose(1, 0)))
x = torch.matmul(self.w2, x)
attn = torch.nn.functional.softmax(x, dim=-1)
x = torch.matmul(attn, X)
return x, attn
class Readout(nn.Module):
"""The readout function. Convert the node embeddings to the graph embeddings."""
def __init__(self,
rtype: str = "none",
hidden_size: int = 0,
attn_hidden: int = None,
attn_out: int = None,
):
"""
The readout function.
:param rtype: readout type, can be "mean" and "self_attention".
:param hidden_size: input hidden size
:param attn_hidden: only valid if rtype == "self_attention". The attention hidden size.
:param attn_out: only valid if rtype == "self_attention". The attention out size.
:param args: legacy use.
"""
super(Readout, self).__init__()
# Cached zeros
self.cached_zero_vector = nn.Parameter(torch.zeros(hidden_size), requires_grad=False)
self.rtype = "mean"
if rtype == "self_attention":
self.attn = SelfAttention(hidden=attn_hidden,
in_feature=hidden_size,
out_feature=attn_out)
self.rtype = "self_attention"
def forward(self, embeddings, scope):
"""
The forward function, given a batch node/edge embedding and a scope list,
produce the graph-level embedding by a scope.
:param embeddings: The embedding matrix, num_atoms or num_bonds \times hidden_size.
:param scope: a list, in which the element is a list [start, range]. `start` is the index
:return:
"""
# Readout
mol_vecs = []
self.attns = []
for _, (a_start, a_size) in enumerate(scope):
if a_size == 0:
mol_vecs.append(self.cached_zero_vector)
else:
cur_hiddens = embeddings.narrow(0, a_start, a_size)
if self.rtype == "self_attention":
cur_hiddens, attn = self.attn(cur_hiddens)
cur_hiddens = cur_hiddens.flatten()
# Temporarily disable. Enable it if you want to save attentions.
# self.attns.append(attn.cpu().detach().numpy())
else:
cur_hiddens = cur_hiddens.sum(dim=0) / a_size
mol_vecs.append(cur_hiddens)
mol_vecs = torch.stack(mol_vecs, dim=0) # (num_molecules, hidden_size)
return mol_vecs
class MPNEncoder(nn.Module):
"""A message passing neural network for encoding a molecule."""
def __init__(self, args: Namespace,
atom_messages: bool,
init_message_dim: int,
attached_fea_fdim: int,
hidden_size: int,
bias: bool,
depth: int,
dropout: float,
undirected: bool,
dense: bool,
aggregate_to_atom: bool,
attach_fea: bool,
input_layer="fc",
dynamic_depth='none'
):
"""
Initializes the MPNEncoder.
:param args: the arguments.
:param atom_messages: enables atom_messages or not.
:param init_message_dim: the initial input message dimension.
:param attached_fea_fdim: the attached feature dimension.
:param hidden_size: the output message dimension during message passing.
:param bias: the bias in the message passing.
:param depth: the message passing depth.
:param dropout: the dropout rate.
:param undirected: the message passing is undirected or not.
:param dense: enables the dense connections.
:param attach_fea: enables the feature attachment during the message passing process.
:param dynamic_depth: enables the dynamic depth. Possible choices: "none", "uniform" and "truncnorm"
"""
super(MPNEncoder, self).__init__()
self.init_message_dim = init_message_dim
self.attached_fea_fdim = attached_fea_fdim
self.hidden_size = hidden_size
self.bias = bias
self.depth = depth
self.dropout = dropout
self.input_layer = input_layer
self.layers_per_message = 1
self.undirected = undirected
self.atom_messages = atom_messages
self.dense = dense
self.aggreate_to_atom = aggregate_to_atom
self.attached_fea = attach_fea
self.dynamic_depth = dynamic_depth
# Dropout
self.dropout_layer = nn.Dropout(p=self.dropout)
# Activation
self.act_func = get_activation_function(args.activation)
# Input
if self.input_layer == "fc":
input_dim = self.init_message_dim
self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias)
if self.attached_fea:
w_h_input_size = self.hidden_size + self.attached_fea_fdim
else:
w_h_input_size = self.hidden_size
# Shared weight matrix across depths (default)
self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias)
def forward(self,
init_messages,
init_attached_features,
a2nei,
a2attached,
b2a=None,
b2revb=None,
adjs=None
) -> torch.FloatTensor:
"""
The forward function.
:param init_messages: initial massages, can be atom features or bond features.
:param init_attached_features: initial attached_features.
:param a2nei: the relation of item to its neighbors. For the atom message passing, a2nei = a2a. For bond
messages a2nei = a2b
:param a2attached: the relation of item to the attached features during message passing. For the atom message
passing, a2attached = a2b. For the bond message passing a2attached = a2a
:param b2a: remove the reversed bond in bond message passing
:param b2revb: remove the revered atom in bond message passing
:return: if aggreate_to_atom or self.atom_messages, return num_atoms x hidden.
Otherwise, return num_bonds x hidden
"""
# Input
if self.input_layer == 'fc':
input = self.W_i(init_messages) # num_bonds x hidden_size # f_bond
message = self.act_func(input) # num_bonds x hidden_size
elif self.input_layer == 'none':
input = init_messages
message = input
attached_fea = init_attached_features # f_atom / f_bond
# dynamic depth
# uniform sampling from depth - 1 to depth + 1
# only works in training.
if self.training and self.dynamic_depth != "none":
if self.dynamic_depth == "uniform":
# uniform sampling
ndepth = numpy.random.randint(self.depth - 3, self.depth + 3)
else:
# truncnorm
mu = self.depth
sigma = 1
lower = mu - 3 * sigma
upper = mu + 3 * sigma
X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
ndepth = int(X.rvs(1))
else:
ndepth = self.depth
# Message passing
for _ in range(ndepth - 1):
if self.undirected:
# two directions should be the same
message = (message + message[b2revb]) / 2
nei_message = select_neighbor_and_aggregate(message, a2nei)
a_message = nei_message
if self.attached_fea:
attached_nei_fea = select_neighbor_and_aggregate(attached_fea, a2attached)
a_message = torch.cat((nei_message, attached_nei_fea), dim=1)
if not self.atom_messages:
rev_message = message[b2revb]
if self.attached_fea:
atom_rev_message = attached_fea[b2a[b2revb]]
rev_message = torch.cat((rev_message, atom_rev_message), dim=1)
# Except reverse bond its-self(w) ! \sum_{k\in N(u) \ w}
message = a_message[b2a] - rev_message # num_bonds x hidden
else:
message = a_message
message = self.W_h(message)
# BUG here, by default MPNEncoder use the dense connection in the message passing step.
# The correct form should if not self.dense
if self.dense:
message = self.act_func(message) # num_bonds x hidden_size
else:
message = self.act_func(input + message)
message = self.dropout_layer(message) # num_bonds x hidden
output = message
return output # num_atoms x hidden
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, activation="PReLU", dropout=0.1, d_out=None):
"""Initialization.
:param d_model: the input dimension.
:param d_ff: the hidden dimension.
:param activation: the activation function.
:param dropout: the dropout rate.
:param d_out: the output dimension, the default value is equal to d_model.
"""
super(PositionwiseFeedForward, self).__init__()
if d_out is None:
d_out = d_model
# By default, bias is on.
self.W_1 = nn.Linear(d_model, d_ff)
self.W_2 = nn.Linear(d_ff, d_out)
self.dropout = nn.Dropout(dropout)
self.act_func = get_activation_function(activation)
def forward(self, x):
"""
The forward function
:param x: input tensor.
:return:
"""
return self.W_2(self.dropout(self.act_func(self.W_1(x))))
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
"""Initialization.
:param size: the input dimension.
:param dropout: the dropout ratio.
"""
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size, elementwise_affine=True)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, outputs):
"""Apply residual connection to any sublayer with the same size."""
# return x + self.dropout(self.norm(x))
if inputs is None:
return self.dropout(self.norm(outputs))
return inputs + self.dropout(self.norm(outputs))
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product SelfAttention
"""
def forward(self, query, key, value, mask=None, dropout=None):
"""
:param query:
:param key:
:param value:
:param mask:
:param dropout:
:return:
"""
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
"""
The multi-head attention module. Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1, bias=False):
"""
:param h:
:param d_model:
:param dropout:
:param bias:
"""
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h # number of heads
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)]) # why 3: query, key, value
self.output_linear = nn.Linear(d_model, d_model, bias)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""
:param query:
:param key:
:param value:
:param mask:
:return:
"""
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, _ = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class Head(nn.Module):
"""
One head for multi-headed attention.
:return: (query, key, value)
"""
def __init__(self, args, hidden_size, atom_messages=False):
"""
Initialization.
:param args: The argument.
:param hidden_size: the dimension of hidden layer in Head.
:param atom_messages: the MPNEncoder type.
"""
super(Head, self).__init__()
atom_fdim = hidden_size
bond_fdim = hidden_size
hidden_size = hidden_size
self.atom_messages = atom_messages
if self.atom_messages:
init_message_dim = atom_fdim
attached_fea_dim = bond_fdim
else:
init_message_dim = bond_fdim
attached_fea_dim = atom_fdim
# Here we use the message passing network as query, key and value.
self.mpn_q = MPNEncoder(args=args,
atom_messages=atom_messages,
init_message_dim=init_message_dim,
attached_fea_fdim=attached_fea_dim,
hidden_size=hidden_size,
bias=args.bias,
depth=args.depth,
dropout=args.dropout,
undirected=args.undirected,
dense=args.dense,
aggregate_to_atom=False,
attach_fea=False,
input_layer="none",
dynamic_depth="truncnorm")
self.mpn_k = MPNEncoder(args=args,
atom_messages=atom_messages,
init_message_dim=init_message_dim,
attached_fea_fdim=attached_fea_dim,
hidden_size=hidden_size,
bias=args.bias,
depth=args.depth,
dropout=args.dropout,
undirected=args.undirected,
dense=args.dense,
aggregate_to_atom=False,
attach_fea=False,
input_layer="none",
dynamic_depth="truncnorm")
self.mpn_v = MPNEncoder(args=args,
atom_messages=atom_messages,
init_message_dim=init_message_dim,
attached_fea_fdim=attached_fea_dim,
hidden_size=hidden_size,
bias=args.bias,
depth=args.depth,
dropout=args.dropout,
undirected=args.undirected,
dense=args.dense,
aggregate_to_atom=False,
attach_fea=False,
input_layer="none",
dynamic_depth="truncnorm")
def forward(self, f_atoms, f_bonds, a2b, a2a, b2a, b2revb):
"""
The forward function.
:param f_atoms: the atom features, num_atoms * atom_dim
:param f_bonds: the bond features, num_bonds * bond_dim
:param a2b: mapping from atom index to incoming bond indices.
:param a2a: mapping from atom index to its neighbors. num_atoms * max_num_bonds
:param b2a: mapping from bond index to the index of the atom the bond is coming from.
:param b2revb: mapping from bond index to the index of the reverse bond.
:return:
"""
if self.atom_messages:
init_messages = f_atoms
init_attached_features = f_bonds
a2nei = a2a
a2attached = a2b
b2a = b2a
b2revb = b2revb
else:
init_messages = f_bonds
init_attached_features = f_atoms
a2nei = a2b
a2attached = a2a
b2a = b2a
b2revb = b2revb
q = self.mpn_q(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
k = self.mpn_k(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
v = self.mpn_v(init_messages=init_messages,
init_attached_features=init_attached_features,
a2nei=a2nei,
a2attached=a2attached,
b2a=b2a,
b2revb=b2revb)
return q, k, v
class MTBlock(nn.Module):
"""
The Multi-headed attention block.
"""
def __init__(self,
args,
num_attn_head,
input_dim,
hidden_size,
activation="ReLU",
dropout=0.0,
bias=True,
atom_messages=False,
cuda=True,
res_connection=False):
"""
:param args: the arguments.
:param num_attn_head: the number of attention head.
:param input_dim: the input dimension.
:param hidden_size: the hidden size of the model.
:param activation: the activation function.
:param dropout: the dropout ratio
:param bias: if true: all linear layer contains bias term.
:param atom_messages: the MPNEncoder type
:param cuda: if true, the model run with GPU.
:param res_connection: enables the skip-connection in MTBlock.
"""
super(MTBlock, self).__init__()
# self.args = args
self.atom_messages = atom_messages
self.hidden_size = hidden_size
self.heads = nn.ModuleList()
self.input_dim = input_dim
self.cuda = cuda
self.res_connection = res_connection
self.act_func = get_activation_function(activation)
self.dropout_layer = nn.Dropout(p=dropout)
# Note: elementwise_affine has to be consistent with the pre-training phase
self.layernorm = nn.LayerNorm(self.hidden_size, elementwise_affine=True)
self.W_i = nn.Linear(self.input_dim, self.hidden_size, bias=bias)
self.attn = MultiHeadedAttention(h=num_attn_head,
d_model=self.hidden_size,
bias=bias,
dropout=dropout)
self.W_o = nn.Linear(self.hidden_size * num_attn_head, self.hidden_size, bias=bias)
self.sublayer = SublayerConnection(self.hidden_size, dropout)
for _ in range(num_attn_head):
self.heads.append(Head(args, hidden_size=hidden_size, atom_messages=atom_messages))
def forward(self, batch, features_batch=None):
"""
:param batch: the graph batch generated by GroverCollator.
:param features_batch: the additional features of molecules. (deprecated)
:return:
"""
f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch
if self.atom_messages:
# Only add linear transformation in the input feature.
if f_atoms.shape[1] != self.hidden_size:
f_atoms = self.W_i(f_atoms)
f_atoms = self.dropout_layer(self.layernorm(self.act_func(f_atoms)))
else: # bond messages
if f_bonds.shape[1] != self.hidden_size:
f_bonds = self.W_i(f_bonds)
f_bonds = self.dropout_layer(self.layernorm(self.act_func(f_bonds)))
queries = []
keys = []
values = []
for head in self.heads:
q, k, v = head(f_atoms, f_bonds, a2b, a2a, b2a, b2revb)
queries.append(q.unsqueeze(1))
keys.append(k.unsqueeze(1))
values.append(v.unsqueeze(1))
queries = torch.cat(queries, dim=1)
keys = torch.cat(keys, dim=1)
values = torch.cat(values, dim=1)
x_out = self.attn(queries, keys, values) # multi-headed attention
x_out = x_out.view(x_out.shape[0], -1)
x_out = self.W_o(x_out)
x_in = None
# support no residual connection in MTBlock.
if self.res_connection:
if self.atom_messages:
x_in = f_atoms
else:
x_in = f_bonds
if self.atom_messages:
f_atoms = self.sublayer(x_in, x_out)
else:
f_bonds = self.sublayer(x_in, x_out)
batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
features_batch = features_batch
return batch, features_batch
class GTransEncoder(nn.Module):
def __init__(self,
args,
hidden_size,
edge_fdim,
node_fdim,
dropout=0.0,
activation="ReLU",
num_mt_block=1,
num_attn_head=4,
atom_emb_output: Union[bool, str] = False, # options: True, False, None, "atom", "bond", "both"
bias=False,
cuda=True,
res_connection=False):
"""
:param args: the arguments.
:param hidden_size: the hidden size of the model.
:param edge_fdim: the dimension of additional feature for edge/bond.
:param node_fdim: the dimension of additional feature for node/atom.
:param dropout: the dropout ratio
:param activation: the activation function
:param num_mt_block: the number of mt block.
:param num_attn_head: the number of attention head.
:param atom_emb_output: enable the output aggregation after message passing.
atom_messages: True False
-False: no aggregating to atom. output size: (num_atoms, hidden_size) (num_bonds, hidden_size)
-True: aggregating to atom. output size: (num_atoms, hidden_size) (num_atoms, hidden_size)
-None: same as False
-"atom": same as True
-"bond": aggragating to bond. output size: (num_bonds, hidden_size) (num_bonds, hidden_size)
-"both": aggregating to atom&bond. output size: (num_atoms, hidden_size) (num_bonds, hidden_size)
(num_bonds, hidden_size) (num_atoms, hidden_size)
:param bias: enable bias term in all linear layers.
:param cuda: run with cuda.
:param res_connection: enables the skip-connection in MTBlock.
"""
super(GTransEncoder, self).__init__()
# For the compatibility issue.
if atom_emb_output is False:
atom_emb_output = None
if atom_emb_output is True:
atom_emb_output = 'atom'
self.hidden_size = hidden_size
self.dropout = dropout
self.activation = activation
self.cuda = cuda
self.bias = bias
self.res_connection = res_connection
self.edge_blocks = nn.ModuleList()
self.node_blocks = nn.ModuleList()
edge_input_dim = edge_fdim
node_input_dim = node_fdim
edge_input_dim_i = edge_input_dim
node_input_dim_i = node_input_dim
for i in range(num_mt_block):
if i != 0:
edge_input_dim_i = self.hidden_size
node_input_dim_i = self.hidden_size
self.edge_blocks.append(MTBlock(args=args,
num_attn_head=num_attn_head,
input_dim=edge_input_dim_i,
hidden_size=self.hidden_size,
activation=activation,
dropout=dropout,
bias=self.bias,
atom_messages=False,
cuda=cuda))
self.node_blocks.append(MTBlock(args=args,
num_attn_head=num_attn_head,
input_dim=node_input_dim_i,
hidden_size=self.hidden_size,
activation=activation,
dropout=dropout,
bias=self.bias,
atom_messages=True,
cuda=cuda))
self.atom_emb_output = atom_emb_output
self.ffn_atom_from_atom = PositionwiseFeedForward(self.hidden_size + node_fdim,
self.hidden_size * 4,
activation=self.activation,
dropout=self.dropout,
d_out=self.hidden_size)
self.ffn_atom_from_bond = PositionwiseFeedForward(self.hidden_size + node_fdim,
self.hidden_size * 4,
activation=self.activation,
dropout=self.dropout,
d_out=self.hidden_size)
self.ffn_bond_from_atom = PositionwiseFeedForward(self.hidden_size + edge_fdim,
self.hidden_size * 4,
activation=self.activation,
dropout=self.dropout,
d_out=self.hidden_size)
self.ffn_bond_from_bond = PositionwiseFeedForward(self.hidden_size + edge_fdim,
self.hidden_size * 4,
activation=self.activation,
dropout=self.dropout,
d_out=self.hidden_size)
self.atom_from_atom_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout)
self.atom_from_bond_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout)
self.bond_from_atom_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout)
self.bond_from_bond_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout)
self.act_func_node = get_activation_function(self.activation)
self.act_func_edge = get_activation_function(self.activation)
self.dropout_layer = nn.Dropout(p=args.dropout)
def pointwise_feed_forward_to_atom_embedding(self, emb_output, atom_fea, index, ffn_layer):
"""
The point-wise feed forward and long-range residual connection for atom view.
aggregate to atom.
:param emb_output: the output embedding from the previous multi-head attentions.
:param atom_fea: the atom/node feature embedding.
:param index: the index of neighborhood relations.
:param ffn_layer: the feed forward layer
:return:
"""
aggr_output = select_neighbor_and_aggregate(emb_output, index)
aggr_outputx = torch.cat([atom_fea, aggr_output], dim=1)
return ffn_layer(aggr_outputx), aggr_output
def pointwise_feed_forward_to_bond_embedding(self, emb_output, bond_fea, a2nei, b2revb, ffn_layer):
"""
The point-wise feed forward and long-range residual connection for bond view.
aggregate to bond.
:param emb_output: the output embedding from the previous multi-head attentions.
:param bond_fea: the bond/edge feature embedding.
:param index: the index of neighborhood relations.
:param ffn_layer: the feed forward layer
:return:
"""
aggr_output = select_neighbor_and_aggregate(emb_output, a2nei)
# remove rev bond / atom --- need for bond view
aggr_output = self.remove_rev_bond_message(emb_output, aggr_output, b2revb)
aggr_outputx = torch.cat([bond_fea, aggr_output], dim=1)
return ffn_layer(aggr_outputx), aggr_output
@staticmethod
def remove_rev_bond_message(orginal_message, aggr_message, b2revb):
"""
:param orginal_message:
:param aggr_message:
:param b2revb:
:return:
"""
rev_message = orginal_message[b2revb]
return aggr_message - rev_message
def atom_bond_transform(self,
to_atom=True, # False: to bond
atomwise_input=None,
bondwise_input=None,
original_f_atoms=None,
original_f_bonds=None,
a2a=None,
a2b=None,
b2a=None,
b2revb=None
):
"""
Transfer the output of atom/bond multi-head attention to the final atom/bond output.
:param to_atom: if true, the output is atom emebedding, otherwise, the output is bond embedding.
:param atomwise_input: the input embedding of atom/node.
:param bondwise_input: the input embedding of bond/edge.
:param original_f_atoms: the initial atom features.
:param original_f_bonds: the initial bond features.
:param a2a: mapping from atom index to its neighbors. num_atoms * max_num_bonds
:param a2b: mapping from atom index to incoming bond indices.
:param b2a: mapping from bond index to the index of the atom the bond is coming from.
:param b2revb: mapping from bond index to the index of the reverse bond.
:return:
"""
if to_atom:
# atom input to atom output
atomwise_input, _ = self.pointwise_feed_forward_to_atom_embedding(atomwise_input, original_f_atoms, a2a,
self.ffn_atom_from_atom)
atom_in_atom_out = self.atom_from_atom_sublayer(None, atomwise_input)
# bond to atom
bondwise_input, _ = self.pointwise_feed_forward_to_atom_embedding(bondwise_input, original_f_atoms, a2b,
self.ffn_atom_from_bond)
bond_in_atom_out = self.atom_from_bond_sublayer(None, bondwise_input)
return atom_in_atom_out, bond_in_atom_out
else: # to bond embeddings
# atom input to bond output
atom_list_for_bond = torch.cat([b2a.unsqueeze(dim=1), a2a[b2a]], dim=1)
atomwise_input, _ = self.pointwise_feed_forward_to_bond_embedding(atomwise_input, original_f_bonds,
atom_list_for_bond,
b2a[b2revb], self.ffn_bond_from_atom)
atom_in_bond_out = self.bond_from_atom_sublayer(None, atomwise_input)
# bond input to bond output
bond_list_for_bond = a2b[b2a]
bondwise_input, _ = self.pointwise_feed_forward_to_bond_embedding(bondwise_input, original_f_bonds,
bond_list_for_bond,
b2revb, self.ffn_bond_from_bond)
bond_in_bond_out = self.bond_from_bond_sublayer(None, bondwise_input)
return atom_in_bond_out, bond_in_bond_out
def forward(self, batch, features_batch = None):
f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch
if self.cuda or next(self.parameters()).is_cuda:
f_atoms, f_bonds, a2b, b2a, b2revb = f_atoms.cuda(), f_bonds.cuda(), a2b.cuda(), b2a.cuda(), b2revb.cuda()
a2a = a2a.cuda()
node_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
edge_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a
# opt pointwise_feed_forward
original_f_atoms, original_f_bonds = f_atoms, f_bonds
# Note: features_batch is not used here.
for nb in self.node_blocks: # atom messages. Multi-headed attention
node_batch, features_batch = nb(node_batch, features_batch)
for eb in self.edge_blocks: # bond messages. Multi-headed attention
edge_batch, features_batch = eb(edge_batch, features_batch)
atom_output, _, _, _, _, _, _, _ = node_batch # atom hidden states
_, bond_output, _, _, _, _, _, _ = edge_batch # bond hidden states
if self.atom_emb_output is None:
# output the embedding from multi-head attention directly.
return atom_output, bond_output
if self.atom_emb_output == 'atom':
return self.atom_bond_transform(to_atom=True, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
elif self.atom_emb_output == 'bond':
return self.atom_bond_transform(to_atom=False, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
else: # 'both'
atom_embeddings = self.atom_bond_transform(to_atom=True, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
bond_embeddings = self.atom_bond_transform(to_atom=False, # False: to bond
atomwise_input=atom_output,
bondwise_input=bond_output,
original_f_atoms=original_f_atoms,
original_f_bonds=original_f_bonds,
a2a=a2a,
a2b=a2b,
b2a=b2a,
b2revb=b2revb)
print(atom_embeddings[0].shape, bond_embeddings[0].shape)
# Notice: need to be consistent with output format of DualMPNN encoder
return ((atom_embeddings[0], bond_embeddings[0]),
(atom_embeddings[1], bond_embeddings[1]))
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/metrics.py | .py | 4,207 | 123 | """
The evaluation metrics.
"""
import math
from typing import List, Callable, Union
from sklearn.metrics import accuracy_score, mean_squared_error, roc_auc_score, mean_absolute_error, r2_score, \
precision_recall_curve, auc, recall_score, confusion_matrix
def accuracy(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:
"""
Computes the accuracy of a binary prediction task using a given threshold for generating hard predictions.
:param targets: A list of binary targets.
:param preds: A list of prediction probabilities.
:param threshold: The threshold above which a prediction is a 1 and below which (inclusive) a prediction is a 0
:return: The computed accuracy.
"""
hard_preds = [1 if p > threshold else 0 for p in preds]
return accuracy_score(targets, hard_preds)
def recall(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:
"""
Computes the recall of a binary prediction task using a given threshold for generating hard predictions.
:param targets: A list of binary targets.
:param preds: A list of prediction probabilities.
:param threshold: The threshold above which a prediction is a 1 and below which (inclusive) a prediction is a 0
:return: The computed recall.
"""
hard_preds = [1 if p > threshold else 0 for p in preds]
return recall_score(targets, hard_preds)
def sensitivity(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:
"""
Computes the sensitivity of a binary prediction task using a given threshold for generating hard predictions.
:param targets: A list of binary targets.
:param preds: A list of prediction probabilities.
:param threshold: The threshold above which a prediction is a 1 and below which (inclusive) a prediction is a 0
:return: The computed sensitivity.
"""
return recall(targets, preds, threshold)
def specificity(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:
"""
Computes the specificity of a binary prediction task using a given threshold for generating hard predictions.
:param targets: A list of binary targets.
:param preds: A list of prediction probabilities.
:param threshold: The threshold above which a prediction is a 1 and below which (inclusive) a prediction is a 0
:return: The computed specificity.
"""
hard_preds = [1 if p > threshold else 0 for p in preds]
tn, fp, _, _ = confusion_matrix(targets, hard_preds).ravel()
return tn / float(tn + fp)
def rmse(targets: List[float], preds: List[float]) -> float:
"""
Computes the root mean squared error.
:param targets: A list of targets.
:param preds: A list of predictions.
:return: The computed rmse.
"""
return math.sqrt(mean_squared_error(targets, preds))
def get_metric_func(metric: str) -> Callable[[Union[List[int], List[float]], List[float]], float]:
"""
Gets the metric function corresponding to a given metric name.
:param metric: Metric name.
:return: A metric function which takes as arguments a list of targets and a list of predictions and returns.
"""
# Note: If you want to add a new metric, please also update the parser argument --metric in parsing.py.
if metric == 'auc':
return roc_auc_score
if metric == 'prc-auc':
return prc_auc
if metric == 'rmse':
return rmse
if metric == 'mae':
return mean_absolute_error
if metric == 'r2':
return r2_score
if metric == 'accuracy':
return accuracy
if metric == 'recall':
return recall
if metric == 'sensitivity':
return sensitivity
if metric == 'specificity':
return specificity
raise ValueError(f'Metric "{metric}" not supported.')
def prc_auc(targets: List[int], preds: List[float]) -> float:
"""
Computes the area under the precision-recall curve.
:param targets: A list of binary targets.
:param preds: A list of prediction probabilities.
:return: The computed prc-auc.
"""
precision, recall, _ = precision_recall_curve(targets, preds)
return auc(recall, precision)
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/multi_gpu_wrapper.py | .py | 3,059 | 111 | """
Wrapper for multi-GPU training.
"""
# use Hovorod for multi-GPU pytorch training
try:
import horovod.torch as mgw
import torch
print('using Horovod for multi-GPU training')
except ImportError:
print('[WARNING] Horovod cannot be imported; multi-GPU training is unsupported')
pass
class MultiGpuWrapper(object):
"""Wrapper for multi-GPU training."""
def __init__(self):
"""Constructor function."""
pass
@classmethod
def init(cls, *args):
"""Initialization."""
try:
return mgw.init(*args)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def size(cls, *args):
"""Get the number of workers at all nodes."""
try:
return mgw.size(*args)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def rank(cls, *args):
"""Get the rank of current worker at all nodes."""
try:
return mgw.rank(*args)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def local_size(cls, *args):
"""Get the number of workers at the current node."""
try:
return mgw.local_size(*args)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def local_rank(cls, *args):
"""Get the rank of current worker at the current node."""
try:
return mgw.local_rank(*args)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def DistributedOptimizer(cls, *args, **kwargs):
"""Get a distributed optimizer from the base optimizer."""
try:
return mgw.DistributedOptimizer(*args, **kwargs)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def broadcast_parameters(cls, *args, **kwargs):
"""Get a operation to broadcast all the parameters."""
try:
return mgw.broadcast_parameters(*args, **kwargs)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def broadcast_optimizer_state(cls, *args, **kwargs):
"""Get a operation to broadcast all the optimizer state."""
try:
return mgw.broadcast_optimizer_state(*args, **kwargs)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def broadcast(cls, *args, **kwargs):
"""Get a operation to broadcast all the optimizer state."""
try:
return mgw.broadcast(*args, **kwargs)
except NameError:
raise NameError('module <mgw> not imported')
@classmethod
def barrier(cls):
"""Add a barrier to synchronize different processes"""
try:
return mgw.allreduce(torch.tensor(0), name='barrier')
except NameError:
raise NameError('module <mgw> not imported')
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/utils.py | .py | 28,757 | 795 | """
The general utility functions.
"""
import csv
import logging
import os
import pickle
import random
from argparse import Namespace
from collections import defaultdict
from logging import Logger
from typing import List, Set, Tuple, Union, Dict
import numpy as np
import torch
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from torch import nn as nn
from tqdm import tqdm as core_tqdm
from grover.data import MoleculeDatapoint, MoleculeDataset, StandardScaler
from grover.model.models import GroverFpGeneration, GroverFinetuneTask
from grover.util.nn_utils import initialize_weights
from grover.util.scheduler import NoamLR
def get_model_args():
"""
Get model structure related parameters
:return: a list containing parameters
"""
return ['model_type', 'ensemble_size', 'input_layer', 'hidden_size', 'bias', 'depth',
'dropout', 'activation', 'undirected', 'ffn_hidden_size', 'ffn_num_layers',
'atom_message', 'weight_decay', 'select_by_loss', 'skip_epoch', 'backbone',
'embedding_output_type', 'self_attention', 'attn_hidden', 'attn_out', 'dense',
'bond_drop_rate', 'distinct_init', 'aug_rate', 'fine_tune_coff', 'nencoders',
'dist_coff', 'no_attach_fea', 'coord', "num_attn_head", "num_mt_block",
]
def save_features(path: str, features: List[np.ndarray]):
"""
Saves features to a compressed .npz file with array name "features".
:param path: Path to a .npz file where the features will be saved.
:param features: A list of 1D numpy arrays containing the features for molecules.
"""
np.savez_compressed(path, features=features)
def load_features(path: str) -> np.ndarray:
"""
Loads features saved in a variety of formats.
Supported formats:
- .npz compressed (assumes features are saved with name "features")
All formats assume that the SMILES strings loaded elsewhere in the code are in the same
order as the features loaded here.
:param path: Path to a file containing features.
:return: A 2D numpy array of size (num_molecules, features_size) containing the features.
"""
extension = os.path.splitext(path)[1]
if extension == '.npz':
features = np.load(path)['features']
else:
raise ValueError(f'Features path extension {extension} not supported.')
return features
class tqdm(core_tqdm):
def __init__(self, *args, **kwargs):
kwargs.setdefault("ascii", True)
super(tqdm, self).__init__(*args, **kwargs)
def get_task_names(path: str, use_compound_names: bool = False) -> List[str]:
"""
Gets the task names from a data CSV file.
:param path: Path to a CSV file.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:return: A list of task names.
"""
index = 2 if use_compound_names else 1
task_names = get_header(path)[index:]
return task_names
def get_header(path: str) -> List[str]:
"""
Returns the header of a data CSV file.
:param path: Path to a CSV file.
:return: A list of strings containing the strings in the comma-separated header.
"""
with open(path) as f:
header = next(csv.reader(f))
return header
def get_num_tasks(path: str) -> int:
"""
Gets the number of tasks in a data CSV file.
:param path: Path to a CSV file.
:return: The number of tasks.
"""
return len(get_header(path)) - 1
def filter_invalid_smiles(data: MoleculeDataset) -> MoleculeDataset:
"""
Filters out invalid SMILES.
:param data: A MoleculeDataset.
:return: A MoleculeDataset with only valid molecules.
"""
datapoint_list = []
for idx, datapoint in enumerate(data):
if datapoint.smiles == '':
print(f'invalid smiles {idx}: {datapoint.smiles}')
continue
mol = Chem.MolFromSmiles(datapoint.smiles)
if mol.GetNumHeavyAtoms() == 0:
print(f'invalid heavy {idx}')
continue
datapoint_list.append(datapoint)
return MoleculeDataset(datapoint_list)
def get_data(path: str,
skip_invalid_smiles: bool = True,
args: Namespace = None,
features_path: List[str] = None,
max_data_size: int = None,
use_compound_names: bool = None,
logger: Logger = None) -> MoleculeDataset:
"""
Gets smiles string and target values (and optionally compound names if provided) from a CSV file.
:param path: Path to a CSV file.
:param skip_invalid_smiles: Whether to skip and filter out invalid smiles.
:param args: Arguments.
:param features_path: A list of paths to files containing features. If provided, it is used
in place of args.features_path.
:param max_data_size: The maximum number of data points to load.
:param use_compound_names: Whether file has compound names in addition to smiles strings.
:param logger: Logger.
:return: A MoleculeDataset containing smiles strings and target values along
with other info such as additional features and compound names when desired.
"""
debug = logger.debug if logger is not None else print
if args is not None:
# Prefer explicit function arguments but default to args if not provided
features_path = features_path if features_path is not None else args.features_path
max_data_size = max_data_size if max_data_size is not None else args.max_data_size
use_compound_names = use_compound_names if use_compound_names is not None else args.use_compound_names
else:
use_compound_names = False
max_data_size = max_data_size or float('inf')
# Load features
if features_path is not None:
features_data = []
for feat_path in features_path:
features_data.append(load_features(feat_path)) # each is num_data x num_features
features_data = np.concatenate(features_data, axis=1)
args.features_dim = len(features_data[0])
else:
features_data = None
if args is not None:
args.features_dim = 0
skip_smiles = set()
# Load data
with open(path) as f:
reader = csv.reader(f)
next(reader) # skip header
lines = []
for line in reader:
smiles = line[0]
if smiles in skip_smiles:
continue
lines.append(line)
if len(lines) >= max_data_size:
break
data = MoleculeDataset([
MoleculeDatapoint(
line=line,
args=args,
features=features_data[i] if features_data is not None else None,
use_compound_names=use_compound_names
) for i, line in tqdm(enumerate(lines), total=len(lines), disable=True)
])
# Filter out invalid SMILES
if skip_invalid_smiles:
original_data_len = len(data)
data = filter_invalid_smiles(data)
if len(data) < original_data_len:
debug(f'Warning: {original_data_len - len(data)} SMILES are invalid.')
return data
def get_data_from_smiles(smiles: List[str], skip_invalid_smiles: bool = True, logger: Logger = None,
args: Namespace = None) -> MoleculeDataset:
"""
Converts SMILES to a MoleculeDataset.
:param smiles: A list of SMILES strings.
:param skip_invalid_smiles: Whether to skip and filter out invalid smiles.
:param logger: Logger.
:return: A MoleculeDataset with all of the provided SMILES.
"""
debug = logger.debug if logger is not None else print
data = MoleculeDataset([MoleculeDatapoint(line=[smile], args=args) for smile in smiles])
# Filter out invalid SMILES
if skip_invalid_smiles:
original_data_len = len(data)
data = filter_invalid_smiles(data)
if len(data) < original_data_len:
debug(f'Warning: {original_data_len - len(data)} SMILES are invalid.')
return data
def split_data(data: MoleculeDataset,
split_type: str = 'random',
sizes: Tuple[float, float, float] = (0.8, 0.1, 0.1),
seed: int = 0,
args: Namespace = None,
logger: Logger = None) -> Tuple[MoleculeDataset,
MoleculeDataset,
MoleculeDataset]:
"""
Splits data into training, validation, and test splits.
:param data: A MoleculeDataset.
:param split_type: Split type.
:param sizes: A length-3 tuple with the proportions of data in the
train, validation, and test sets.
:param seed: The random seed to use before shuffling data.
:param args: Namespace of arguments.
:param logger: A logger.
:return: A tuple containing the train, validation, and test splits of the data.
"""
assert len(sizes) == 3 and sum(sizes) == 1
if args is not None:
folds_file, val_fold_index, test_fold_index = \
args.folds_file, args.val_fold_index, args.test_fold_index
else:
folds_file = val_fold_index = test_fold_index = None
if split_type == 'crossval':
index_set = args.crossval_index_sets[args.seed]
data_split = []
for split in range(3):
split_indices = []
for index in index_set[split]:
with open(os.path.join(args.crossval_index_dir, f'{index}.pkl'), 'rb') as rf:
split_indices.extend(pickle.load(rf))
data_split.append([data[i] for i in split_indices])
train, val, test = tuple(data_split)
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'index_predetermined':
split_indices = args.crossval_index_sets[args.seed]
assert len(split_indices) == 3
data_split = []
for split in range(3):
data_split.append([data[i] for i in split_indices[split]])
train, val, test = tuple(data_split)
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'predetermined':
if not val_fold_index:
assert sizes[2] == 0 # test set is created separately so use all of the other data for train and val
assert folds_file is not None
assert test_fold_index is not None
try:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f)
except UnicodeDecodeError:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f, encoding='latin1') # in case we're loading indices from python2
# assert len(data) == sum([len(fold_indices) for fold_indices in all_fold_indices])
log_scaffold_stats(data, all_fold_indices, logger=logger)
folds = [[data[i] for i in fold_indices] for fold_indices in all_fold_indices]
test = folds[test_fold_index]
if val_fold_index is not None:
val = folds[val_fold_index]
train_val = []
for i in range(len(folds)):
if i != test_fold_index and (val_fold_index is None or i != val_fold_index):
train_val.extend(folds[i])
if val_fold_index is not None:
train = train_val
else:
random.seed(seed)
random.shuffle(train_val)
train_size = int(sizes[0] * len(train_val))
train = train_val[:train_size]
val = train_val[train_size:]
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
elif split_type == 'scaffold_balanced':
return scaffold_split(data, sizes=sizes, balanced=True, seed=seed, logger=logger)
elif split_type == 'random':
data.shuffle(seed=seed)
train_size = int(sizes[0] * len(data))
train_val_size = int((sizes[0] + sizes[1]) * len(data))
train = data[:train_size]
val = data[train_size:train_val_size]
test = data[train_val_size:]
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
else:
raise ValueError(f'split_type "{split_type}" not supported.')
def get_class_sizes(data: MoleculeDataset) -> List[List[float]]:
"""
Determines the proportions of the different classes in the classification dataset.
:param data: A classification dataset
:return: A list of lists of class proportions. Each inner list contains the class proportions
for a task.
"""
targets = data.targets()
# Filter out Nones
valid_targets = [[] for _ in range(data.num_tasks())]
for i in range(len(targets)):
for task_num in range(len(targets[i])):
if targets[i][task_num] is not None:
valid_targets[task_num].append(targets[i][task_num])
class_sizes = []
for task_targets in valid_targets:
# Make sure we're dealing with a binary classification task
assert set(np.unique(task_targets)) <= {0, 1}
try:
ones = np.count_nonzero(task_targets) / len(task_targets)
except ZeroDivisionError:
ones = float('nan')
print('Warning: class has no targets')
class_sizes.append([1 - ones, ones])
return class_sizes
def generate_scaffold(mol: Union[str, Chem.Mol], include_chirality: bool = False) -> str:
"""
Compute the Bemis-Murcko scaffold for a SMILES string.
:param mol: A smiles string or an RDKit molecule.
:param include_chirality: Whether to include chirality.
:return:
"""
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
scaffold = MurckoScaffold.MurckoScaffoldSmiles(mol=mol, includeChirality=include_chirality)
return scaffold
def scaffold_to_smiles(mols: Union[List[str], List[Chem.Mol]],
use_indices: bool = False) -> Dict[str, Union[Set[str], Set[int]]]:
"""
Computes scaffold for each smiles string and returns a mapping from scaffolds to sets of smiles.
:param mols: A list of smiles strings or RDKit molecules.
:param use_indices: Whether to map to the smiles' index in all_smiles rather than mapping
to the smiles string itself. This is necessary if there are duplicate smiles.
:return: A dictionary mapping each unique scaffold to all smiles (or smiles indices) which have that scaffold.
"""
scaffolds = defaultdict(set)
for i, mol in tqdm(enumerate(mols), total=len(mols)):
scaffold = generate_scaffold(mol)
if use_indices:
scaffolds[scaffold].add(i)
else:
scaffolds[scaffold].add(mol)
return scaffolds
def scaffold_split(data: MoleculeDataset,
sizes: Tuple[float, float, float] = (0.8, 0.1, 0.1),
balanced: bool = False,
seed: int = 0,
logger: logging.Logger = None) -> Tuple[MoleculeDataset,
MoleculeDataset,
MoleculeDataset]:
"""
Split a dataset by scaffold so that no molecules sharing a scaffold are in the same split.
:param data: A MoleculeDataset.
:param sizes: A length-3 tuple with the proportions of data in the
train, validation, and test sets.
:param balanced: Try to balance sizes of scaffolds in each set, rather than just putting smallest in test set.
:param seed: Seed for shuffling when doing balanced splitting.
:param logger: A logger.
:return: A tuple containing the train, validation, and test splits of the data.
"""
assert sum(sizes) == 1
# Split
train_size, val_size, test_size = sizes[0] * len(data), sizes[1] * len(data), sizes[2] * len(data)
train, val, test = [], [], []
train_scaffold_count, val_scaffold_count, test_scaffold_count = 0, 0, 0
# Map from scaffold to index in the data
scaffold_to_indices = scaffold_to_smiles(data.smiles(), use_indices=True)
if balanced: # Put stuff that's bigger than half the val/test size into train, rest just order randomly
index_sets = list(scaffold_to_indices.values())
big_index_sets = []
small_index_sets = []
for index_set in index_sets:
if len(index_set) > val_size / 2 or len(index_set) > test_size / 2:
big_index_sets.append(index_set)
else:
small_index_sets.append(index_set)
random.seed(seed)
random.shuffle(big_index_sets)
random.shuffle(small_index_sets)
index_sets = big_index_sets + small_index_sets
else: # Sort from largest to smallest scaffold sets
index_sets = sorted(list(scaffold_to_indices.values()),
key=lambda index_set: len(index_set),
reverse=True)
for index_set in index_sets:
if len(train) + len(index_set) <= train_size:
train += index_set
train_scaffold_count += 1
elif len(val) + len(index_set) <= val_size:
val += index_set
val_scaffold_count += 1
else:
test += index_set
test_scaffold_count += 1
if logger is not None:
logger.debug(f'Total scaffolds = {len(scaffold_to_indices):,} | '
f'train scaffolds = {train_scaffold_count:,} | '
f'val scaffolds = {val_scaffold_count:,} | '
f'test scaffolds = {test_scaffold_count:,}')
log_scaffold_stats(data, index_sets, logger=logger)
# Map from indices to data
train = [data[i] for i in train]
val = [data[i] for i in val]
test = [data[i] for i in test]
return MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test)
def log_scaffold_stats(data: MoleculeDataset,
index_sets: List[Set[int]],
num_scaffolds: int = 10,
num_labels: int = 20,
logger: logging.Logger = None) -> List[Tuple[List[float], List[int]]]:
"""
Logs and returns statistics about counts and average target values in molecular scaffolds.
:param data: A MoleculeDataset.
:param index_sets: A list of sets of indices representing splits of the data.
:param num_scaffolds: The number of scaffolds about which to display statistics.
:param num_labels: The number of labels about which to display statistics.
:param logger: A Logger.
:return: A list of tuples where each tuple contains a list of average target values
across the first num_labels labels and a list of the number of non-zero values for
the first num_scaffolds scaffolds, sorted in decreasing order of scaffold frequency.
"""
# print some statistics about scaffolds
target_avgs = []
counts = []
for index_set in index_sets:
data_set = [data[i] for i in index_set]
targets = [d.targets for d in data_set]
targets = np.array(targets, dtype=np.float)
target_avgs.append(np.nanmean(targets, axis=0))
counts.append(np.count_nonzero(~np.isnan(targets), axis=0))
stats = [(target_avgs[i][:num_labels], counts[i][:num_labels]) for i in range(min(num_scaffolds, len(target_avgs)))]
if logger is not None:
logger.debug('Label averages per scaffold, in decreasing order of scaffold frequency,'
f'capped at {num_scaffolds} scaffolds and {num_labels} labels: {stats}')
return stats
def makedirs(path: str, isfile: bool = False):
"""
Creates a directory given a path to either a directory or file.
If a directory is provided, creates that directory. If a file is provided (i.e. isfiled == True),
creates the parent directory for that file.
:param path: Path to a directory or file.
:param isfile: Whether the provided path is a directory or file.
"""
if isfile:
path = os.path.dirname(path)
if path != '':
os.makedirs(path, exist_ok=True)
def load_args(path: str) -> Namespace:
"""
Loads the arguments a model was trained with.
:param path: Path where model checkpoint is saved.
:return: The arguments Namespace that the model was trained with.
"""
return torch.load(path, map_location=lambda storage, loc: storage)['args']
def get_ffn_layer_id(model: GroverFinetuneTask):
"""
Get the ffn layer id for GroverFinetune Task. (Adhoc!)
:param model:
:return:
"""
return [id(x) for x in model.state_dict() if "grover" not in x and "ffn" in x]
def build_optimizer(model: nn.Module, args: Namespace):
"""
Builds an Optimizer.
:param model: The model to optimize.
:param args: Arguments.
:return: An initialized Optimizer.
"""
# Only adjust the learning rate for the GroverFinetuneTask.
if type(model) == GroverFinetuneTask:
ffn_params = get_ffn_layer_id(model)
else:
# if not, init adam optimizer normally.
return torch.optim.Adam(model.parameters(), lr=args.init_lr, weight_decay=args.weight_decay)
base_params = filter(lambda p: id(p) not in ffn_params, model.parameters())
ffn_params = filter(lambda p: id(p) in ffn_params, model.parameters())
if args.fine_tune_coff == 0:
for param in base_params:
param.requires_grad = False
optimizer = torch.optim.Adam([
{'params': base_params, 'lr': args.init_lr * args.fine_tune_coff},
{'params': ffn_params, 'lr': args.init_lr}
], lr=args.init_lr, weight_decay=args.weight_decay)
return optimizer
def build_lr_scheduler(optimizer, args: Namespace, total_epochs: List[int] = None):
"""
Builds a learning rate scheduler.
:param optimizer: The Optimizer whose learning rate will be scheduled.
:param args: Arguments.
:param total_epochs: The total number of epochs for which the model will be task.
:return: An initialized learning rate scheduler.
"""
# Learning rate scheduler
# Divide the parameter into two groups for the finetune.
return NoamLR(
optimizer=optimizer,
warmup_epochs=args.warmup_epochs,
total_epochs=args.epochs,
steps_per_epoch=args.train_data_size // args.batch_size,
init_lr=args.init_lr,
max_lr=args.max_lr,
final_lr=args.final_lr,
fine_tune_coff=args.fine_tune_coff
)
def create_logger(name: str, save_dir: str = None, quiet: bool = False) -> logging.Logger:
"""
Creates a logger with a stream handler and two file handlers.
The stream handler prints to the screen depending on the value of `quiet`.
One file handler (verbose.log) saves all logs, the other (quiet.log) only saves important info.
:param name: The name of the logger.
:param save_dir: The directory in which to save the logs.
:param quiet: Whether the stream handler should be quiet (i.e. print only important info).
:return: The logger.
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# Set logger depending on desired verbosity
ch = logging.StreamHandler()
if quiet:
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
if save_dir is not None:
makedirs(save_dir)
fh_v = logging.FileHandler(os.path.join(save_dir, 'verbose.log'))
fh_v.setLevel(logging.DEBUG)
fh_q = logging.FileHandler(os.path.join(save_dir, 'quiet.log'))
fh_q.setLevel(logging.INFO)
logger.addHandler(fh_v)
logger.addHandler(fh_q)
return logger
def load_checkpoint(path: str,
current_args: Namespace = None,
cuda: bool = None,
logger: logging.Logger = None):
"""
Loads a model checkpoint.
:param path: Path where checkpoint is saved.
:param current_args: The current arguments. Replaces the arguments loaded from the checkpoint if provided.
:param cuda: Whether to move model to cuda.
:param logger: A logger.
:return: The loaded MPNN.
"""
debug = logger.debug if logger is not None else print
# Load model and args
state = torch.load(path, map_location=lambda storage, loc: storage)
args, loaded_state_dict = state['args'], state['state_dict']
model_ralated_args = get_model_args()
if current_args is not None:
for key, value in vars(args).items():
if key in model_ralated_args:
setattr(current_args, key, value)
else:
current_args = args
# args.cuda = cuda if cuda is not None else args.cuda
# Build model
model = build_model(current_args)
model_state_dict = model.state_dict()
# Skip missing parameters and parameters of mismatched size
pretrained_state_dict = {}
for param_name in loaded_state_dict.keys():
new_param_name = param_name
if new_param_name not in model_state_dict:
debug(f'Pretrained parameter "{param_name}" cannot be found in model parameters.')
elif model_state_dict[new_param_name].shape != loaded_state_dict[param_name].shape:
debug(f'Pretrained parameter "{param_name}" '
f'of shape {loaded_state_dict[param_name].shape} does not match corresponding '
f'model parameter of shape {model_state_dict[new_param_name].shape}.')
else:
debug(f'Loading pretrained parameter "{param_name}".')
pretrained_state_dict[new_param_name] = loaded_state_dict[param_name]
# Load pretrained weights
model_state_dict.update(pretrained_state_dict)
model.load_state_dict(model_state_dict)
if cuda:
debug('Moving model to cuda')
model = model.cuda()
return model
def get_loss_func(args: Namespace, model=None):
"""
Gets the loss function corresponding to a given dataset type.
:param args: Namespace containing the dataset type ("classification" or "regression").
:return: A PyTorch loss function.
"""
if hasattr(model, "get_loss_func"):
return model.get_loss_func(args)
if args.dataset_type == 'classification':
return nn.BCEWithLogitsLoss(reduction='none')
if args.dataset_type == 'regression':
return nn.MSELoss(reduction='none')
raise ValueError(f'Dataset type "{args.dataset_type}" not supported.')
def load_scalars(path: str):
"""
Loads the scalars a model was trained with.
:param path: Path where model checkpoint is saved.
:return: A tuple with the data scaler and the features scaler.
"""
state = torch.load(path, map_location=lambda storage, loc: storage)
scaler = StandardScaler(state['data_scaler']['means'],
state['data_scaler']['stds']) if state['data_scaler'] is not None else None
features_scaler = StandardScaler(state['features_scaler']['means'],
state['features_scaler']['stds'],
replace_nan_token=0) if state['features_scaler'] is not None else None
return scaler, features_scaler
def save_checkpoint(path: str,
model,
scaler,
features_scaler,
args: Namespace = None):
"""
Saves a model checkpoint.
:param model: A MPNN.
:param scaler: A StandardScaler fitted on the data.
:param features_scaler: A StandardScaler fitted on the features.
:param args: Arguments namespace.
:param path: Path where checkpoint will be saved.
"""
state = {
'args': args,
'state_dict': model.state_dict(),
'data_scaler': {
'means': scaler.means,
'stds': scaler.stds
} if scaler is not None else None,
'features_scaler': {
'means': features_scaler.means,
'stds': features_scaler.stds
} if features_scaler is not None else None
}
torch.save(state, path)
def build_model(args: Namespace, model_idx=0):
"""
Builds a MPNN, which is a message passing neural network + feed-forward layers.
:param args: Arguments.
:return: A MPNN containing the MPN encoder along with final linear layers with parameters initialized.
"""
if hasattr(args, 'num_tasks'):
args.output_size = args.num_tasks
else:
args.output_size = 1
if args.parser_name == "fingerprint":
model = GroverFpGeneration(args)
else:
# finetune and evaluation case.
model = GroverFinetuneTask(args)
initialize_weights(model=model, model_idx=model_idx)
return model
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/scheduler.py | .py | 4,499 | 98 | """
The learning rate scheduler.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/nn_utils.py
"""
from typing import List, Union
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from SelfAttention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
def __init__(self,
optimizer,
warmup_epochs: List[Union[float, int]],
total_epochs: List[int],
steps_per_epoch: int,
init_lr: List[float],
max_lr: List[float],
final_lr: List[float],
fine_tune_coff: float = 1.0,
fine_tune_param_idx: int = 0):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
:param fine_tune_coff: The fine tune coefficient for the target param group. The true learning rate for the
target param group would be lr*fine_tune_coff.
:param fine_tune_param_idx: The index of target param group. Default is index 0.
"""
# assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == \
# len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array([warmup_epochs] * self.num_lrs)
self.total_epochs = np.array([total_epochs] * self.num_lrs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array([init_lr] * self.num_lrs)
self.max_lr = np.array([max_lr] * self.num_lrs)
self.final_lr = np.array([final_lr] * self.num_lrs)
self.lr_coff = np.array([1] * self.num_lrs)
self.fine_tune_param_idx = fine_tune_param_idx
self.lr_coff[self.fine_tune_param_idx] = fine_tune_coff
self.current_step = 0
self.lr = [init_lr] * self.num_lrs
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer)
def get_lr(self) -> List[float]:
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.lr[i] *= self.lr_coff[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/nn_utils.py | .py | 3,492 | 97 | """
The utility function for model construction.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/nn_utils.py
"""
import torch
from torch import nn as nn
def param_count(model: nn.Module) -> int:
"""
Determines number of trainable parameters.
:param model: An nn.Module.
:return: The number of trainable parameters.
"""
return sum(param.numel() for param in model.parameters() if param.requires_grad)
def index_select_nd(source: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
"""
Selects the message features from source corresponding to the atom or bond indices in index.
:param source: A tensor of shape (num_bonds, hidden_size) containing message features.
:param index: A tensor of shape (num_atoms/num_bonds, max_num_bonds) containing the atom or bond
indices to select from source.
:return: A tensor of shape (num_atoms/num_bonds, max_num_bonds, hidden_size) containing the message
features corresponding to the atoms/bonds specified in index.
"""
index_size = index.size() # (num_atoms/num_bonds, max_num_bonds)
suffix_dim = source.size()[1:] # (hidden_size,)
final_size = index_size + suffix_dim # (num_atoms/num_bonds, max_num_bonds, hidden_size)
target = source.index_select(dim=0, index=index.view(-1)) # (num_atoms/num_bonds * max_num_bonds, hidden_size)
target = target.view(final_size) # (num_atoms/num_bonds, max_num_bonds, hidden_size)
return target
def get_activation_function(activation: str) -> nn.Module:
"""
Gets an activation function module given the name of the activation.
:param activation: The name of the activation function.
:return: The activation function module.
"""
if activation == 'ReLU':
return nn.ReLU()
elif activation == 'LeakyReLU':
return nn.LeakyReLU(0.1)
elif activation == 'PReLU':
return nn.PReLU()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'SELU':
return nn.SELU()
elif activation == 'ELU':
return nn.ELU()
elif activation == "Linear":
return lambda x: x
else:
raise ValueError(f'Activation "{activation}" not supported.')
def initialize_weights(model: nn.Module, distinct_init=False, model_idx=0):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
init_fns = [nn.init.kaiming_normal_, nn.init.kaiming_uniform_,
nn.init.xavier_normal_, nn.init.xavier_uniform_]
for param in model.parameters():
if param.dim() == 1:
nn.init.constant_(param, 0)
else:
if distinct_init:
init_fn = init_fns[model_idx % 4]
if 'kaiming' in init_fn.__name__:
init_fn(param, nonlinearity='relu')
else:
init_fn(param)
else:
nn.init.xavier_normal_(param)
def select_neighbor_and_aggregate(feature, index):
"""
The basic operation in message passing.
Caution: the index_selec_ND would cause the reproducibility issue when performing the training on CUDA.
See: https://pytorch.org/docs/stable/notes/randomness.html
:param feature: the candidate feature for aggregate. (n_nodes, hidden)
:param index: the selected index (neighbor indexes).
:return:
"""
neighbor = index_select_nd(feature, index)
return neighbor.sum(dim=1)
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/util/parsing.py | .py | 23,328 | 488 | """
The parsing functions for the argument input.
"""
import os
import pickle
from argparse import ArgumentParser, Namespace
from tempfile import TemporaryDirectory
import torch
from grover.data.molfeaturegenerator import get_available_features_generators
from grover.util.utils import makedirs
def add_common_args(parser: ArgumentParser):
parser.add_argument('--no_cache', action='store_true', default=True,
help='Turn off caching mol2graph computation')
parser.add_argument('--gpu', type=int, default=0,
choices=list(range(torch.cuda.device_count())),
help='Which GPU to use')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='Turn off cuda')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size')
def add_predict_args(parser: ArgumentParser):
"""
Adds predict arguments to an ArgumentParser.
:param parser: An ArgumentParser.
"""
add_common_args(parser)
parser.add_argument('--data_path', type=str,
help='Path to CSV file containing testing data for which predictions will be made')
parser.add_argument('--output_path', type=str,
help='Path to CSV file where predictions will be saved')
parser.add_argument('--checkpoint_dir', type=str,
help='Directory from which to load model checkpoints'
'(walks directory and ensembles all models that are found)')
parser.add_argument('--features_generator', type=str, nargs='*',
choices=get_available_features_generators(),
help='Method of generating additional features')
parser.add_argument('--features_path', type=str, nargs='*',
help='Path to features to use in FNN (instead of features_generator)')
parser.add_argument('--no_features_scaling', action='store_true', default=False,
help='Turn off scaling of features')
def add_fingerprint_args(parser):
add_common_args(parser)
# parameters for fingerprints generation
parser.add_argument('--data_path', type=str, help='Input csv file which contains SMILES')
parser.add_argument('--output_path', type=str,
help='Path to npz file where predictions will be saved')
parser.add_argument('--features_path', type=str, nargs='*',
help='Path to features to use in FNN (instead of features_generator)')
parser.add_argument('--fingerprint_source', type=str,
choices=['atom', 'bond', 'both'], default='both',
help='The source to generate the fingerprints.')
parser.add_argument('--checkpoint_path', type=str, help='model path')
parser.add_argument('--save_lmdb_path', type=str, default=None)
def add_finetune_args(parser: ArgumentParser):
"""
Adds training arguments to an ArgumentParser.
:param parser: An ArgumentParser.
"""
# General arguments
add_common_args(parser)
parser.add_argument('--tensorboard', action='store_true', default=False, help='Add tensorboard logger')
# Data argumenets
parser.add_argument('--data_path', type=str,
help='Path to data CSV file.')
parser.add_argument('--use_compound_names', action='store_true', default=False,
help='Use when test data file contains compound names in addition to SMILES strings')
parser.add_argument('--max_data_size', type=int,
help='Maximum number of data points to load')
# Disable this option due to some bugs.
# parser.add_argument('--test', action='store_true', default=False,
# help='Whether to skip training and only test the model')
parser.add_argument('--features_only', action='store_true', default=False,
help='Use only the additional features in an FFN, no graph network')
parser.add_argument('--features_generator', type=str, nargs='*',
choices=get_available_features_generators(),
help='Method of generating additional features.')
parser.add_argument('--features_path', type=str, nargs='*',
help='Path to features to use in FNN (instead of features_generator).')
parser.add_argument('--save_dir', type=str, default=None,
help='Directory where model checkpoints will be saved')
parser.add_argument('--save_smiles_splits', action='store_true', default=False,
help='Save smiles for each train/val/test splits for prediction convenience later')
parser.add_argument('--checkpoint_dir', type=str, default=None,
help='Directory from which to load model checkpoints'
'(walks directory and ensembles all models that are found)')
parser.add_argument('--checkpoint_path', type=str, default=None,
help='Path to model checkpoint (.pt file)')
# Data splitting.
parser.add_argument('--dataset_type', type=str,
choices=['classification', 'regression'], default='classification',
help='Type of dataset, e.g. classification or regression.'
'This determines the loss function used during training.')
parser.add_argument('--separate_val_path', type=str,
help='Path to separate val set, optional')
parser.add_argument('--separate_val_features_path', type=str, nargs='*',
help='Path to file with features for separate val set')
parser.add_argument('--separate_test_path', type=str,
help='Path to separate test set, optional')
parser.add_argument('--separate_test_features_path', type=str, nargs='*',
help='Path to file with features for separate test set')
parser.add_argument('--split_type', type=str, default='random',
choices=['random', 'scaffold_balanced', 'predetermined', 'crossval', 'index_predetermined'],
help='Method of splitting the data into train/val/test')
parser.add_argument('--split_sizes', type=float, nargs=3, default=[0.8, 0.1, 0.1],
help='Split proportions for train/validation/test sets')
parser.add_argument('--num_folds', type=int, default=1,
help='Number of folds when performing cross validation')
parser.add_argument('--folds_file', type=str, default=None,
help='Optional file of fold labels')
parser.add_argument('--val_fold_index', type=int, default=None,
help='Which fold to use as val for leave-one-out cross val')
parser.add_argument('--test_fold_index', type=int, default=None,
help='Which fold to use as test for leave-one-out cross val')
parser.add_argument('--crossval_index_dir', type=str,
help='Directory in which to find cross validation index files')
parser.add_argument('--crossval_index_file', type=str,
help='Indices of files to use as train/val/test'
'Overrides --num_folds and --seed.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed to use when splitting data into train/val/test sets.'
'When `num_folds` > 1, the first fold uses this seed and all'
'subsequent folds add 1 to the seed.')
# Metric
parser.add_argument('--metric', type=str, default=None,
choices=['auc',
'prc-auc',
'rmse',
'mae',
'r2',
'accuracy',
'recall',
'sensitivity',
'specificity',
'matthews_corrcoef'],
help='Metric to use during evaluation.'
'Note: Does NOT affect loss function used during training'
'(loss is determined by the `dataset_type` argument).'
'Note: Defaults to "auc" for classification and "rmse" for regression.')
parser.add_argument('--show_individual_scores', action='store_true', default=False,
help='Show all scores for individual targets, not just average, at the end')
# Training arguments
parser.add_argument('--epochs', type=int, default=30,
help='Number of epochs to task')
parser.add_argument('--warmup_epochs', type=float, default=2.0,
help='Number of epochs during which learning rate increases linearly from'
'init_lr to max_lr. Afterwards, learning rate decreases exponentially'
'from max_lr to final_lr.')
parser.add_argument('--init_lr', type=float, default=1e-4,
help='Initial learning rate')
parser.add_argument('--max_lr', type=float, default=1e-3,
help='Maximum learning rate')
parser.add_argument('--final_lr', type=float, default=1e-4,
help='Final learning rate')
parser.add_argument('--no_features_scaling', action='store_true', default=False,
help='Turn off scaling of features')
parser.add_argument('--early_stop_epoch', type=int, default=1000, help='If val loss did not drop in '
'this epochs, stop running')
# Model arguments
parser.add_argument('--ensemble_size', type=int, default=1,
help='Number of models for ensemble prediction.')
parser.add_argument('--dropout', type=float, default=0.0,
help='Dropout probability')
parser.add_argument('--activation', type=str, default='ReLU',
choices=['ReLU', 'LeakyReLU', 'PReLU', 'tanh', 'SELU', 'ELU'],
help='Activation function')
parser.add_argument('--ffn_hidden_size', type=int, default=None,
help='Hidden dim for higher-capacity FFN (defaults to hidden_size)')
parser.add_argument('--ffn_num_layers', type=int, default=2,
help='Number of layers in FFN after MPN encoding')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight_decay')
parser.add_argument('--select_by_loss', action='store_true', default=False,
help='Use validation loss as refence standard to select best model to predict')
parser.add_argument("--embedding_output_type", default="atom", choices=["atom", "bond", "both"],
help="This the model parameters for pretrain model. The current finetuning task only use the "
"embeddings from atom branch. ")
# Self-attentive readout.
parser.add_argument('--self_attention', action='store_true', default=False, help='Use self attention layer. '
'Otherwise use mean aggregation '
'layer.')
parser.add_argument('--attn_hidden', type=int, default=4, nargs='?', help='Self attention layer '
'hidden layer size.')
parser.add_argument('--attn_out', type=int, default=128, nargs='?', help='Self attention layer '
'output feature size.')
parser.add_argument('--dist_coff', type=float, default=0.1, help='The dist coefficient for output of two branches.')
parser.add_argument('--bond_drop_rate', type=float, default=0, help='Drop out bond in molecular.')
parser.add_argument('--distinct_init', action='store_true', default=False,
help='Using distinct weight init for model ensemble')
parser.add_argument('--fine_tune_coff', type=float, default=1,
help='Enable distinct fine tune learning rate for fc and other layer')
# For multi-gpu finetune.
parser.add_argument('--enbl_multi_gpu', dest='enbl_multi_gpu',
action='store_true', default=False,
help='enable multi-GPU training')
def add_pretrain_args(parser: ArgumentParser):
parser.add_argument('--cuda', type=bool, default=True,
help='Enable gpu traning or not.')
parser.add_argument('--enable_multi_gpu', dest='enable_multi_gpu',
action='store_true', default=False,
help='enable multi-GPU training')
# Data arguments
parser.add_argument('--data_path', type=str,
help='Path to data CSV file')
parser.add_argument('--fg_label_path', type=str, nargs='*',
help='Path to the label of fg task.')
parser.add_argument('--atom_vocab_path', type=str, help="Path to the vocabulary.")
parser.add_argument('--bond_vocab_path', type=str,
help="Path to the bond vocabulary.")
# Model arguments
parser.add_argument('--embedding_output_type', type=str, default='both', nargs='?',
choices=("atom", "bond", "both"),
help="Type of output embeddings. Options: atom, bond, both")
#parser.add_argument('--source_branch', type=str, default='both', nargs='?', choices=("atom", "bond", "both"),
# help="Type of source branch in gtrans. Options: atom, bond, both")
parser.add_argument('--save_dir', type=str, default=None,
help='Directory where model checkpoints will be saved')
parser.add_argument('--save_interval', type=int, default=9999999999, help='The model saving interval.')
parser.add_argument('--hidden_size', type=float, default=3,
help='Dimensionality of hidden layers. The actual dimension is hidden_size * 100.')
parser.add_argument('--bias', action='store_true', default=False,
help='Whether to add bias to linear layers')
parser.add_argument('--depth', type=int, default=3,
help='Number of message passing steps')
parser.add_argument('--dropout', type=float, default=0.0,
help='Dropout probability')
parser.add_argument('--activation', type=str, default='PReLU',
choices=['ReLU', 'LeakyReLU', 'PReLU', 'tanh', 'SELU', 'ELU'],
help='Activation function')
parser.add_argument('--undirected', action='store_true', default=False,
help='Undirected edges (always sum the two relevant bond vectors)')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight_decay')
parser.add_argument('--num_attn_head', type=int, default=4, help='The attention head in MTBlock.')
parser.add_argument('--num_mt_block', type=int, default=1, help="The number of MTBlock.")
parser.add_argument('--dist_coff', type=float, default=0.1, help='The disagreement coefficient for '
'the atom and bond branch.')
# Training arguments
parser.add_argument("--backbone", default="gtrans", choices=["gtrans"])
parser.add_argument('--epochs', type=int, default=30,
help='Number of epochs to run')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size')
parser.add_argument('--warmup_epochs', type=float, default=2.0,
help='Number of epochs during which learning rate increases linearly from'
'init_lr to max_lr. Afterwards, learning rate decreases exponentially'
'from max_lr to final_lr.')
parser.add_argument('--init_lr', type=float, default=1e-4,
help='Initial learning rate')
parser.add_argument('--max_lr', type=float, default=1e-3,
help='Maximum learning rate')
parser.add_argument('--final_lr', type=float, default=1e-4,
help='Final learning rate')
parser.add_argument('--bond_drop_rate', type=float, default=0, help='Drop out bond in molecular')
def update_checkpoint_args(args: Namespace):
"""
Walks the checkpoint directory to find all checkpoints, updating args.checkpoint_paths and args.ensemble_size.
:param args: Arguments.
"""
if hasattr(args, 'checkpoint_paths') and args.checkpoint_paths is not None:
return
if not hasattr(args, 'checkpoint_path'):
args.checkpoint_path = None
if not hasattr(args, 'checkpoint_dir'):
args.checkpoint_dir = None
if args.checkpoint_dir is not None and args.checkpoint_path is not None:
raise ValueError('Only one of checkpoint_dir and checkpoint_path can be specified.')
if args.checkpoint_dir is None:
args.checkpoint_paths = [args.checkpoint_path] if args.checkpoint_path is not None else None
return
args.checkpoint_paths = []
for root, _, files in os.walk(args.checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
args.checkpoint_paths.append(os.path.join(root, fname))
if args.parser_name == "eval":
assert args.ensemble_size * args.num_folds == len(args.checkpoint_paths)
args.ensemble_size = len(args.checkpoint_paths)
if args.ensemble_size == 0:
raise ValueError(f'Failed to find any model checkpoints in directory "{args.checkpoint_dir}"')
def modify_predict_args(args: Namespace):
"""
Modifies and validates predicting args in place.
:param args: Arguments.
"""
assert args.data_path
assert args.output_path
assert args.checkpoint_dir is not None or args.checkpoint_path is not None or args.checkpoint_paths is not None
update_checkpoint_args(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
del args.no_cuda
# Create directory for preds path
makedirs(args.output_path, isfile=True)
setattr(args, 'fingerprint', False)
def modify_fingerprint_args(args):
assert args.data_path
assert args.output_path
assert args.checkpoint_path is not None or args.checkpoint_paths is not None
update_checkpoint_args(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
del args.no_cuda
makedirs(args.output_path, isfile=True)
setattr(args, 'fingerprint', True)
def get_newest_train_args():
"""
For backward compatibility.
:return: A Namespace containing the newest training arguments
"""
dummy_parser = ArgumentParser()
add_finetune_args(dummy_parser)
args = dummy_parser.parse_args(args=[])
args.data_path = ''
modify_train_args(args)
return args
def modify_train_args(args: Namespace):
"""
Modifies and validates training arguments in place.
:param args: Arguments.
"""
global TEMP_DIR # Prevents the temporary directory from being deleted upon function return
assert args.data_path is not None
assert args.dataset_type is not None
if args.save_dir is not None:
makedirs(args.save_dir)
else:
TEMP_DIR = TemporaryDirectory()
args.save_dir = TEMP_DIR.name
args.cuda = not args.no_cuda and torch.cuda.is_available()
del args.no_cuda
args.features_scaling = not args.no_features_scaling
del args.no_features_scaling
if args.metric is None:
if args.dataset_type == 'classification':
args.metric = 'auc'
else:
args.metric = 'rmse'
if not ((args.dataset_type == 'classification' and args.metric in ['auc', 'prc-auc', 'accuracy']) or
(args.dataset_type == 'regression' and args.metric in ['rmse', 'mae', 'r2'])):
raise ValueError(f'Metric "{args.metric}" invalid for dataset type "{args.dataset_type}".')
args.minimize_score = args.metric in ['rmse', 'mae']
update_checkpoint_args(args)
if args.features_only:
assert args.features_generator or args.features_path
args.use_input_features = args.features_generator or args.features_path
if args.features_generator is not None and 'rdkit_2d_normalized' in args.features_generator:
assert not args.features_scaling
args.num_lrs = 1
assert (args.split_type == 'predetermined') == (args.folds_file is not None) == (args.test_fold_index is not None)
assert (args.split_type == 'crossval') == (args.crossval_index_dir is not None)
assert (args.split_type in ['crossval', 'index_predetermined']) == (args.crossval_index_file is not None)
if args.split_type in ['crossval', 'index_predetermined']:
with open(args.crossval_index_file, 'rb') as rf:
args.crossval_index_sets = pickle.load(rf)
args.num_folds = len(args.crossval_index_sets)
args.seed = 0
if args.bond_drop_rate > 0:
args.no_cache = True
setattr(args, 'fingerprint', False)
def modify_pretrain_args(args: Namespace):
"""
:param args:
:return:
"""
args.dense = False
args.fine_tune_coff = 1
args.no_cache = True
args.hidden_size = int(args.hidden_size)
def parse_args() -> Namespace:
"""
Parses arguments for training and testing (includes modifying/validating arguments).
:return: A Namespace containing the parsed, modified, and validated args.
"""
parser = ArgumentParser()
subparser = parser.add_subparsers(title="subcommands",
dest="parser_name",
help="Subcommands for fintune, prediction, and fingerprint.")
parser_finetune = subparser.add_parser('finetune', help="Fine tune the pre-trained model.")
add_finetune_args(parser_finetune)
parser_eval = subparser.add_parser('eval', help="Evaluate the results of the pre-trained model.")
add_finetune_args(parser_eval)
parser_predict = subparser.add_parser('predict', help="Predict results from fine tuned model.")
add_predict_args(parser_predict)
parser_fp = subparser.add_parser('fingerprint', help="Get the fingerprints of SMILES.")
add_fingerprint_args(parser_fp)
parser_pretrain = subparser.add_parser('pretrain', help="Pretrain with unlabelled SMILES.")
add_pretrain_args(parser_pretrain)
args = parser.parse_args()
if args.parser_name == 'finetune' or args.parser_name == 'eval':
modify_train_args(args)
elif args.parser_name == "pretrain":
modify_pretrain_args(args)
elif args.parser_name == 'predict':
modify_predict_args(args)
elif args.parser_name == 'fingerprint':
modify_fingerprint_args(args)
return args
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/moldataset.py | .py | 8,625 | 246 | """
The molecule dataset for finetuning.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/data/data.py
"""
import random
from argparse import Namespace
from typing import Callable, List, Union
import numpy as np
from rdkit import Chem
from torch.utils.data.dataset import Dataset
from grover.data.molfeaturegenerator import get_features_generator
from grover.data.scaler import StandardScaler
class MoleculeDatapoint:
"""A MoleculeDatapoint contains a single molecule and its associated features and targets."""
def __init__(self,
line: List[str],
args: Namespace = None,
features: np.ndarray = None,
use_compound_names: bool = False):
"""
Initializes a MoleculeDatapoint, which contains a single molecule.
:param line: A list of strings generated by separating a line in a data CSV file by comma.
:param args: Arguments.
:param features: A numpy array containing additional features (ex. Morgan fingerprint).
:param use_compound_names: Whether the data CSV includes the compound name on each line.
"""
self.features_generator = None
self.args = None
if args is not None:
if hasattr(args, "features_generator"):
self.features_generator = args.features_generator
self.args = args
if features is not None and self.features_generator is not None:
raise ValueError('Currently cannot provide both loaded features and a features generator.')
self.features = features
if use_compound_names:
self.compound_name = line[0] # str
line = line[1:]
else:
self.compound_name = None
self.smiles = line[0] # str
# Generate additional features if given a generator
if self.features_generator is not None:
self.features = []
mol = Chem.MolFromSmiles(self.smiles)
for fg in self.features_generator:
features_generator = get_features_generator(fg)
if mol is not None and mol.GetNumHeavyAtoms() > 0:
if fg in ['morgan', 'morgan_count']:
self.features.extend(features_generator(mol, num_bits=args.num_bits))
else:
self.features.extend(features_generator(mol))
self.features = np.array(self.features)
# Fix nans in features
if self.features is not None:
replace_token = 0
self.features = np.where(np.isnan(self.features), replace_token, self.features)
# Create targets
self.targets = [float(x) if x != '' else None for x in line[1:]]
def set_features(self, features: np.ndarray):
"""
Sets the features of the molecule.
:param features: A 1-D numpy array of features for the molecule.
"""
self.features = features
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
return len(self.targets)
def set_targets(self, targets: List[float]):
"""
Sets the targets of a molecule.
:param targets: A list of floats containing the targets.
"""
self.targets = targets
class MoleculeDataset(Dataset):
"""A MoleculeDataset contains a list of molecules and their associated features and targets."""
def __init__(self, data: List[MoleculeDatapoint]):
"""
Initializes a MoleculeDataset, which contains a list of MoleculeDatapoints (i.e. a list of molecules).
:param data: A list of MoleculeDatapoints.
"""
self.data = data
self.args = self.data[0].args if len(self.data) > 0 else None
self.scaler = None
def compound_names(self) -> List[str]:
"""
Returns the compound names associated with the molecule (if they exist).
:return: A list of compound names or None if the dataset does not contain compound names.
"""
if len(self.data) == 0 or self.data[0].compound_name is None:
return None
return [d.compound_name for d in self.data]
def smiles(self) -> List[str]:
"""
Returns the smiles strings associated with the molecules.
:return: A list of smiles strings.
"""
return [d.smiles for d in self.data]
def features(self) -> List[np.ndarray]:
"""
Returns the features associated with each molecule (if they exist).
:return: A list of 1D numpy arrays containing the features for each molecule or None if there are no features.
"""
if len(self.data) == 0 or self.data[0].features is None:
return None
return [d.features for d in self.data]
def targets(self) -> List[List[float]]:
"""
Returns the targets associated with each molecule.
:return: A list of lists of floats containing the targets.
"""
return [d.targets for d in self.data]
def num_tasks(self) -> int:
"""
Returns the number of prediction tasks.
:return: The number of tasks.
"""
if self.args.dataset_type == 'multiclass':
return int(max([i[0] for i in self.targets()])) + 1
else:
return self.data[0].num_tasks() if len(self.data) > 0 else None
def features_size(self) -> int:
"""
Returns the size of the features array associated with each molecule.
:return: The size of the features.
"""
return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None
def shuffle(self, seed: int = None):
"""
Shuffles the dataset.
:param seed: Optional random seed.
"""
if seed is not None:
random.seed(seed)
random.shuffle(self.data)
def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:
"""
Normalizes the features of the dataset using a StandardScaler (subtract mean, divide by standard deviation).
If a scaler is provided, uses that scaler to perform the normalization. Otherwise fits a scaler to the
features in the dataset and then performs the normalization.
:param scaler: A fitted StandardScaler. Used if provided. Otherwise a StandardScaler is fit on
this dataset and is then used.
:param replace_nan_token: What to replace nans with.
:return: A fitted StandardScaler. If a scaler is provided, this is the same scaler. Otherwise, this is
a scaler fit on this dataset.
"""
if len(self.data) == 0 or self.data[0].features is None:
return None
if scaler is not None:
self.scaler = scaler
elif self.scaler is None:
features = np.vstack([d.features for d in self.data])
self.scaler = StandardScaler(replace_nan_token=replace_nan_token)
self.scaler.fit(features)
for d in self.data:
d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])
return self.scaler
def set_targets(self, targets: List[List[float]]):
"""
Sets the targets for each molecule in the dataset. Assumes the targets are aligned with the datapoints.
:param targets: A list of lists of floats containing targets for each molecule. This must be the
same length as the underlying dataset.
"""
assert len(self.data) == len(targets)
for i in range(len(self.data)):
self.data[i].set_targets(targets[i])
def sort(self, key: Callable):
"""
Sorts the dataset using the provided key.
:param key: A function on a MoleculeDatapoint to determine the sorting order.
"""
self.data.sort(key=key)
def __len__(self) -> int:
"""
Returns the length of the dataset (i.e. the number of molecules).
:return: The length of the dataset.
"""
return len(self.data)
def __getitem__(self, idx) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
"""
Gets one or more MoleculeDatapoints via an index or slice.
:param item: An index (int) or a slice object.
:return: A MoleculeDatapoint if an int is provided or a list of MoleculeDatapoints if a slice is provided.
"""
return self.data[idx]
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/groverdataset.py | .py | 7,951 | 248 | """
The dataset used in training GROVER.
"""
import math
import os
import csv
from typing import Union, List
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from rdkit import Chem
import grover.util.utils as feautils
from grover.data import mol2graph
from grover.data.moldataset import MoleculeDatapoint
from grover.data.task_labels import atom_to_vocab, bond_to_vocab
def get_data(data_path, logger=None):
"""
Load data from the data_path.
:param data_path: the data_path.
:param logger: the logger.
:return:
"""
debug = logger.debug if logger is not None else print
summary_path = os.path.join(data_path, "summary.txt")
smiles_path = os.path.join(data_path, "graph")
feature_path = os.path.join(data_path, "feature")
fin = open(summary_path)
n_files = int(fin.readline().strip().split(":")[-1])
n_samples = int(fin.readline().strip().split(":")[-1])
sample_per_file = int(fin.readline().strip().split(":")[-1])
debug("Loading data:")
debug("Number of files: %d" % n_files)
debug("Number of samples: %d" % n_samples)
debug("Samples/file: %d" % sample_per_file)
datapoints = []
for i in range(n_files):
smiles_path_i = os.path.join(smiles_path, str(i) + ".csv")
feature_path_i = os.path.join(feature_path, str(i) + ".npz")
n_samples_i = sample_per_file if i != (n_files - 1) else n_samples % sample_per_file
datapoints.append(BatchDatapoint(smiles_path_i, feature_path_i, n_samples_i))
return BatchMolDataset(datapoints), sample_per_file
def split_data(data,
split_type='random',
sizes=(0.8, 0.1, 0.1),
seed=0,
logger=None):
"""
Split data with given train/validation/test ratio.
:param data:
:param split_type:
:param sizes:
:param seed:
:param logger:
:return:
"""
assert len(sizes) == 3 and sum(sizes) == 1
if split_type == "random":
data.shuffle(seed=seed)
data = data.data
train_size = int(sizes[0] * len(data))
train_val_size = int((sizes[0] + sizes[1]) * len(data))
train = data[:train_size]
val = data[train_size:train_val_size]
test = data[train_val_size:]
return BatchMolDataset(train), BatchMolDataset(val), BatchMolDataset(test)
else:
raise NotImplementedError("Do not support %s splits" % split_type)
class BatchDatapoint:
def __init__(self,
smiles_file,
feature_file,
n_samples,
):
self.smiles_file = smiles_file
self.feature_file = feature_file
# deal with the last batch graph numbers.
self.n_samples = n_samples
self.datapoints = None
def load_datapoints(self):
features = self.load_feature()
self.datapoints = []
with open(self.smiles_file) as f:
reader = csv.reader(f)
next(reader)
for i, line in enumerate(reader):
# line = line[0]
d = MoleculeDatapoint(line=line,
features=features[i])
self.datapoints.append(d)
assert len(self.datapoints) == self.n_samples
def load_feature(self):
return feautils.load_features(self.feature_file)
def shuffle(self):
pass
def clean_cache(self):
del self.datapoints
self.datapoints = None
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
assert self.datapoints is not None
return self.datapoints[idx]
def is_loaded(self):
return self.datapoints is not None
class BatchMolDataset(Dataset):
def __init__(self, data: List[BatchDatapoint],
graph_per_file=None):
self.data = data
self.len = 0
for d in self.data:
self.len += len(d)
if graph_per_file is not None:
self.sample_per_file = graph_per_file
else:
self.sample_per_file = len(self.data[0]) if len(self.data) != 0 else None
def shuffle(self, seed: int = None):
pass
def clean_cache(self):
for d in self.data:
d.clean_cache()
def __len__(self) -> int:
return self.len
def __getitem__(self, idx) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:
# print(idx)
dp_idx = int(idx / self.sample_per_file)
real_idx = idx % self.sample_per_file
return self.data[dp_idx][real_idx]
def load_data(self, idx):
dp_idx = int(idx / self.sample_per_file)
if not self.data[dp_idx].is_loaded():
self.data[dp_idx].load_datapoints()
def count_loaded_datapoints(self):
res = 0
for d in self.data:
if d.is_loaded():
res += 1
return res
class GroverCollator(object):
def __init__(self, shared_dict, atom_vocab, bond_vocab, args):
self.args = args
self.shared_dict = shared_dict
self.atom_vocab = atom_vocab
self.bond_vocab = bond_vocab
def atom_random_mask(self, smiles_batch):
"""
Perform the random mask operation on atoms.
:param smiles_batch:
:return: The corresponding atom labels.
"""
# There is a zero padding.
vocab_label = [0]
percent = 0.15
for smi in smiles_batch:
mol = Chem.MolFromSmiles(smi)
mlabel = [0] * mol.GetNumAtoms()
n_mask = math.ceil(mol.GetNumAtoms() * percent)
perm = np.random.permutation(mol.GetNumAtoms())[:n_mask]
for p in perm:
atom = mol.GetAtomWithIdx(int(p))
mlabel[p] = self.atom_vocab.stoi.get(atom_to_vocab(mol, atom), self.atom_vocab.other_index)
vocab_label.extend(mlabel)
return vocab_label
def bond_random_mask(self, smiles_batch):
"""
Perform the random mask operaiion on bonds.
:param smiles_batch:
:return: The corresponding bond labels.
"""
# There is a zero padding.
vocab_label = [0]
percent = 0.15
for smi in smiles_batch:
mol = Chem.MolFromSmiles(smi)
nm_atoms = mol.GetNumAtoms()
nm_bonds = mol.GetNumBonds()
mlabel = []
n_mask = math.ceil(nm_bonds * percent)
perm = np.random.permutation(nm_bonds)[:n_mask]
virtual_bond_id = 0
for a1 in range(nm_atoms):
for a2 in range(a1 + 1, nm_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is None:
continue
if virtual_bond_id in perm:
label = self.bond_vocab.stoi.get(bond_to_vocab(mol, bond), self.bond_vocab.other_index)
mlabel.extend([label])
else:
mlabel.extend([0])
virtual_bond_id += 1
# todo: might need to consider bond_drop_rate
# todo: double check reverse bond
vocab_label.extend(mlabel)
return vocab_label
def __call__(self, batch):
smiles_batch = [d.smiles for d in batch]
batchgraph = mol2graph(smiles_batch, self.shared_dict, self.args).get_components()
atom_vocab_label = torch.Tensor(self.atom_random_mask(smiles_batch)).long()
bond_vocab_label = torch.Tensor(self.bond_random_mask(smiles_batch)).long()
fgroup_label = torch.Tensor([d.features for d in batch]).float()
# may be some mask here
res = {"graph_input": batchgraph,
"targets": {"av_task": atom_vocab_label,
"bv_task": bond_vocab_label,
"fg_task": fgroup_label}
}
return res
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/dist_sampler.py | .py | 4,982 | 138 | """
The re-implemented distributed sampler for the distributed training of GROVER.
"""
import math
import time
import torch
from torch.utils.data.sampler import Sampler
import torch.distributed as dist
class DistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, sample_per_file=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.sample_per_file = sample_per_file
self.shuffle = shuffle
def get_indices(self):
indices = list(range(len(self.dataset)))
if self.sample_per_file is not None:
indices = self.sub_indices_of_rank(indices)
else:
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
s = self.rank * self.num_samples
e = min((self.rank + 1) * self.num_samples, len(indices))
# indices = indices[self.rank:self.total_size:self.num_replicas]
indices = indices[s:e]
if self.shuffle:
g = torch.Generator()
# the seed need to be considered.
g.manual_seed((self.epoch + 1) * (self.rank + 1) * time.time())
idx = torch.randperm(len(indices), generator=g).tolist()
indices = [indices[i] for i in idx]
# disable this since sub_indices_of_rank.
# assert len(indices) == self.num_samples
return indices
def sub_indices_of_rank(self, indices):
# fix generator for each epoch
g = torch.Generator()
# All data should be loaded in each epoch.
g.manual_seed((self.epoch + 1) * 2 + 3)
# the fake file indices to cache
f_indices = list(range(int(math.ceil(len(indices) * 1.0 / self.sample_per_file))))
idx = torch.randperm(len(f_indices), generator=g).tolist()
f_indices = [f_indices[i] for i in idx]
file_per_rank = int(math.ceil(len(f_indices) * 1.0 / self.num_replicas))
# add extra fake file to make it evenly divisible
f_indices += f_indices[:(file_per_rank * self.num_replicas - len(f_indices))]
# divide index by rank
rank_s = self.rank * file_per_rank
rank_e = min((self.rank + 1) * file_per_rank, len(f_indices))
# get file index for this rank
f_indices = f_indices[rank_s:rank_e]
# print("f_indices")
# print(f_indices)
res_indices = []
for fi in f_indices:
# get real indices for this rank
si = fi * self.sample_per_file
ei = min((fi + 1) * self.sample_per_file, len(indices))
cur_idx = [indices[i] for i in range(si, ei)]
res_indices += cur_idx
self.num_samples = len(res_indices)
return res_indices
def __iter__(self):
return iter(self.get_indices())
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
if __name__ == "__main__":
# dataset = [1] * 9
# ds = DistributedSampler(dataset, num_replicas=2, rank=0, shuffle=True)
# print(ds.get_indices())
# ds = DistributedSampler(dataset, num_replicas=2, rank=1, shuffle=True)
# print(ds.get_indices())
dataset = [1] * 190001
res = []
ds = DistributedSampler(dataset, num_replicas=2, rank=0, shuffle=True, sample_per_file=777)
res.extend(ds.get_indices())
print(len(ds.get_indices()))
ds = DistributedSampler(dataset, num_replicas=2, rank=1, shuffle=True, sample_per_file=777)
res.extend(ds.get_indices())
print(len(ds.get_indices()))
print(len(set(res)))
print("hello")
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/molgraph.py | .py | 15,938 | 388 | """
The data structure of Molecules.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/features/featurization.py
"""
from argparse import Namespace
from typing import List, Tuple, Union
import numpy as np
import torch
from rdkit import Chem
# Atom feature sizes
MAX_ATOMIC_NUM = 100
ATOM_FEATURES = {
'atomic_num': list(range(MAX_ATOMIC_NUM)),
'degree': [0, 1, 2, 3, 4, 5],
'formal_charge': [-1, -2, 1, 2, 0],
'chiral_tag': [0, 1, 2, 3],
'num_Hs': [0, 1, 2, 3, 4],
'hybridization': [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2
],
}
# len(choices) + 1 to include room for uncommon values; + 2 at end for IsAromatic and mass
ATOM_FDIM = sum(len(choices) + 1 for choices in ATOM_FEATURES.values()) + 2
BOND_FDIM = 14
def get_atom_fdim() -> int:
"""
Gets the dimensionality of atom features.
:param: Arguments.
"""
return ATOM_FDIM + 18
def get_bond_fdim() -> int:
"""
Gets the dimensionality of bond features.
:param: Arguments.
"""
return BOND_FDIM
def onek_encoding_unk(value: int, choices: List[int]) -> List[int]:
"""
Creates a one-hot encoding.
:param value: The value for which the encoding should be one.
:param choices: A list of possible values.
:return: A one-hot encoding of the value in a list of length len(choices) + 1.
If value is not in the list of choices, then the final element in the encoding is 1.
"""
encoding = [0] * (len(choices) + 1)
if min(choices) < 0:
index = value
else:
index = choices.index(value) if value in choices else -1
# if index >= len(encoding):
# print(value)
# print(choices)
# print(index)
encoding[index] = 1
return encoding
class MolGraph:
"""
A MolGraph represents the graph structure and featurization of a single molecule.
A MolGraph computes the following attributes:
- smiles: Smiles string.
- n_atoms: The number of atoms in the molecule.
- n_bonds: The number of bonds in the molecule.
- f_atoms: A mapping from an atom index to a list atom features.
- f_bonds: A mapping from a bond index to a list of bond features.
- a2b: A mapping from an atom index to a list of incoming bond indices.
- b2a: A mapping from a bond index to the index of the atom the bond originates from.
- b2revb: A mapping from a bond index to the index of the reverse bond.
"""
def __init__(self, smiles: str, args: Namespace):
"""
Computes the graph structure and featurization of a molecule.
:param smiles: A smiles string.
:param args: Arguments.
"""
self.smiles = smiles
self.args = args
self.n_atoms = 0 # number of atoms
self.n_bonds = 0 # number of bonds
self.f_atoms = [] # mapping from atom index to atom features
self.f_bonds = [] # mapping from bond index to concat(in_atom, bond) features
self.a2b = [] # mapping from atom index to incoming bond indices
self.b2a = [] # mapping from bond index to the index of the atom the bond is coming from
self.b2revb = [] # mapping from bond index to the index of the reverse bond
# Convert smiles to molecule
mol = Chem.MolFromSmiles(smiles)
self.hydrogen_donor = Chem.MolFromSmarts("[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0]),n&H1&+0]")
self.hydrogen_acceptor = Chem.MolFromSmarts(
"[$([O,S;H1;v2;!$(*-*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),"
"n&H0&+0,$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]")
self.acidic = Chem.MolFromSmarts("[$([C,S](=[O,S,P])-[O;H1,-1])]")
self.basic = Chem.MolFromSmarts(
"[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))]),$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);"
"!$([C,a](=O))]),$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]")
self.hydrogen_donor_match = sum(mol.GetSubstructMatches(self.hydrogen_donor), ())
self.hydrogen_acceptor_match = sum(mol.GetSubstructMatches(self.hydrogen_acceptor), ())
self.acidic_match = sum(mol.GetSubstructMatches(self.acidic), ())
self.basic_match = sum(mol.GetSubstructMatches(self.basic), ())
self.ring_info = mol.GetRingInfo()
# fake the number of "atoms" if we are collapsing substructures
self.n_atoms = mol.GetNumAtoms()
# Get atom features
for _, atom in enumerate(mol.GetAtoms()):
self.f_atoms.append(self.atom_features(atom))
self.f_atoms = [self.f_atoms[i] for i in range(self.n_atoms)]
for _ in range(self.n_atoms):
self.a2b.append([])
# Get bond features
for a1 in range(self.n_atoms):
for a2 in range(a1 + 1, self.n_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is None:
continue
if args.bond_drop_rate > 0:
if np.random.binomial(1, args.bond_drop_rate):
continue
f_bond = self.bond_features(bond)
# Always treat the bond as directed.
self.f_bonds.append(self.f_atoms[a1] + f_bond)
self.f_bonds.append(self.f_atoms[a2] + f_bond)
# Update index mappings
b1 = self.n_bonds
b2 = b1 + 1
self.a2b[a2].append(b1) # b1 = a1 --> a2
self.b2a.append(a1)
self.a2b[a1].append(b2) # b2 = a2 --> a1
self.b2a.append(a2)
self.b2revb.append(b2)
self.b2revb.append(b1)
self.n_bonds += 2
def atom_features(self, atom: Chem.rdchem.Atom) -> List[Union[bool, int, float]]:
"""
Builds a feature vector for an atom.
:param atom: An RDKit atom.
:param functional_groups: A k-hot vector indicating the functional groups the atom belongs to.
:return: A list containing the atom features.
"""
formal_charge = atom.GetFormalCharge()
if formal_charge > 2:
formal_charge = 2
elif formal_charge < -2:
formal_charge = -2
features = onek_encoding_unk(atom.GetAtomicNum() - 1, ATOM_FEATURES['atomic_num']) + \
onek_encoding_unk(atom.GetTotalDegree(), ATOM_FEATURES['degree']) + \
onek_encoding_unk(formal_charge, ATOM_FEATURES['formal_charge']) + \
onek_encoding_unk(int(atom.GetChiralTag()), ATOM_FEATURES['chiral_tag']) + \
onek_encoding_unk(int(atom.GetTotalNumHs()), ATOM_FEATURES['num_Hs']) + \
onek_encoding_unk(int(atom.GetHybridization()), ATOM_FEATURES['hybridization']) + \
[1 if atom.GetIsAromatic() else 0] + \
[atom.GetMass() * 0.01]
atom_idx = atom.GetIdx()
features = features + \
onek_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \
[atom_idx in self.hydrogen_acceptor_match] + \
[atom_idx in self.hydrogen_donor_match] + \
[atom_idx in self.acidic_match] + \
[atom_idx in self.basic_match] + \
[self.ring_info.IsAtomInRingOfSize(atom_idx, 3),
self.ring_info.IsAtomInRingOfSize(atom_idx, 4),
self.ring_info.IsAtomInRingOfSize(atom_idx, 5),
self.ring_info.IsAtomInRingOfSize(atom_idx, 6),
self.ring_info.IsAtomInRingOfSize(atom_idx, 7),
self.ring_info.IsAtomInRingOfSize(atom_idx, 8)]
return features
def bond_features(self, bond: Chem.rdchem.Bond
) -> List[Union[bool, int, float]]:
"""
Builds a feature vector for a bond.
:param bond: A RDKit bond.
:return: A list containing the bond features.
"""
if bond is None:
fbond = [1] + [0] * (BOND_FDIM - 1)
else:
bt = bond.GetBondType()
fbond = [
0, # bond is not None
bt == Chem.rdchem.BondType.SINGLE,
bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE,
bt == Chem.rdchem.BondType.AROMATIC,
(bond.GetIsConjugated() if bt is not None else 0),
(bond.IsInRing() if bt is not None else 0)
]
fbond += onek_encoding_unk(int(bond.GetStereo()), list(range(6)))
return fbond
class BatchMolGraph:
"""
A BatchMolGraph represents the graph structure and featurization of a batch of molecules.
A BatchMolGraph contains the attributes of a MolGraph plus:
- smiles_batch: A list of smiles strings.
- n_mols: The number of molecules in the batch.
- atom_fdim: The dimensionality of the atom features.
- bond_fdim: The dimensionality of the bond features (technically the combined atom/bond features).
- a_scope: A list of tuples indicating the start and end atom indices for each molecule.
- b_scope: A list of tuples indicating the start and end bond indices for each molecule.
- max_num_bonds: The maximum number of bonds neighboring an atom in this batch.
- b2b: (Optional) A mapping from a bond index to incoming bond indices.
- a2a: (Optional): A mapping from an atom index to neighboring atom indices.
"""
def __init__(self, mol_graphs: List[MolGraph], args: Namespace):
self.smiles_batch = [mol_graph.smiles for mol_graph in mol_graphs]
self.n_mols = len(self.smiles_batch)
self.atom_fdim = get_atom_fdim()
self.bond_fdim = get_bond_fdim() + self.atom_fdim
# Start n_atoms and n_bonds at 1 b/c zero padding
self.n_atoms = 1 # number of atoms (start at 1 b/c need index 0 as padding)
self.n_bonds = 1 # number of bonds (start at 1 b/c need index 0 as padding)
self.a_scope = [] # list of tuples indicating (start_atom_index, num_atoms) for each molecule
self.b_scope = [] # list of tuples indicating (start_bond_index, num_bonds) for each molecule
# All start with zero padding so that indexing with zero padding returns zeros
f_atoms = [[0] * self.atom_fdim] # atom features
f_bonds = [[0] * self.bond_fdim] # combined atom/bond features
a2b = [[]] # mapping from atom index to incoming bond indices
b2a = [0] # mapping from bond index to the index of the atom the bond is coming from
b2revb = [0] # mapping from bond index to the index of the reverse bond
for mol_graph in mol_graphs:
f_atoms.extend(mol_graph.f_atoms)
f_bonds.extend(mol_graph.f_bonds)
for a in range(mol_graph.n_atoms):
a2b.append([b + self.n_bonds for b in mol_graph.a2b[a]])
for b in range(mol_graph.n_bonds):
b2a.append(self.n_atoms + mol_graph.b2a[b])
b2revb.append(self.n_bonds + mol_graph.b2revb[b])
self.a_scope.append((self.n_atoms, mol_graph.n_atoms))
self.b_scope.append((self.n_bonds, mol_graph.n_bonds))
self.n_atoms += mol_graph.n_atoms
self.n_bonds += mol_graph.n_bonds
# max with 1 to fix a crash in rare case of all single-heavy-atom mols
self.max_num_bonds = max(1, max(len(in_bonds) for in_bonds in a2b))
self.f_atoms = torch.FloatTensor(f_atoms)
self.f_bonds = torch.FloatTensor(f_bonds)
self.a2b = torch.LongTensor([a2b[a] + [0] * (self.max_num_bonds - len(a2b[a])) for a in range(self.n_atoms)])
self.b2a = torch.LongTensor(b2a)
self.b2revb = torch.LongTensor(b2revb)
self.b2b = None # try to avoid computing b2b b/c O(n_atoms^3)
self.a2a = self.b2a[self.a2b] # only needed if using atom messages
self.a_scope = torch.LongTensor(self.a_scope)
self.b_scope = torch.LongTensor(self.b_scope)
def set_new_atom_feature(self, f_atoms):
"""
Set the new atom feature. Do not update bond feature.
:param f_atoms:
"""
self.f_atoms = f_atoms
def get_components(self) -> Tuple[torch.FloatTensor, torch.FloatTensor,
torch.LongTensor, torch.LongTensor, torch.LongTensor,
List[Tuple[int, int]], List[Tuple[int, int]]]:
"""
Returns the components of the BatchMolGraph.
:return: A tuple containing PyTorch tensors with the atom features, bond features, and graph structure
and two lists indicating the scope of the atoms and bonds (i.e. which molecules they belong to).
"""
return self.f_atoms, self.f_bonds, self.a2b, self.b2a, self.b2revb, self.a_scope, self.b_scope, self.a2a
def get_b2b(self) -> torch.LongTensor:
"""
Computes (if necessary) and returns a mapping from each bond index to all the incoming bond indices.
:return: A PyTorch tensor containing the mapping from each bond index to all the incoming bond indices.
"""
if self.b2b is None:
b2b = self.a2b[self.b2a] # num_bonds x max_num_bonds
# b2b includes reverse edge for each bond so need to mask out
revmask = (b2b != self.b2revb.unsqueeze(1).repeat(1, b2b.size(1))).long() # num_bonds x max_num_bonds
self.b2b = b2b * revmask
return self.b2b
def get_a2a(self) -> torch.LongTensor:
"""
Computes (if necessary) and returns a mapping from each atom index to all neighboring atom indices.
:return: A PyTorch tensor containing the mapping from each bond index to all the incodming bond indices.
"""
if self.a2a is None:
# b = a1 --> a2
# a2b maps a2 to all incoming bonds b
# b2a maps each bond b to the atom it comes from a1
# thus b2a[a2b] maps atom a2 to neighboring atoms a1
self.a2a = self.b2a[self.a2b] # num_atoms x max_num_bonds
return self.a2a
def mol2graph(smiles_batch: List[str], shared_dict,
args: Namespace) -> BatchMolGraph:
"""
Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs.
:param smiles_batch: A list of SMILES strings.
:param args: Arguments.
:return: A BatchMolGraph containing the combined molecular graph for the molecules
"""
mol_graphs = []
for smiles in smiles_batch:
if smiles in shared_dict:
mol_graph = shared_dict[smiles]
else:
mol_graph = MolGraph(smiles, args)
if not args.no_cache:
shared_dict[smiles] = mol_graph
mol_graphs.append(mol_graph)
return BatchMolGraph(mol_graphs, args)
class MolCollator(object):
"""
Collator for pytorch dataloader
:param shared_dict: a shared dict of multiprocess.
:param args: Arguments.
"""
def __init__(self, shared_dict, args):
self.args = args
self.shared_dict = shared_dict
def __call__(self, batch):
smiles_batch = [d.smiles for d in batch]
features_batch = [d.features for d in batch]
target_batch = [d.targets for d in batch]
batch_mol_graph = mol2graph(smiles_batch, self.shared_dict, self.args)
batch = batch_mol_graph.get_components()
mask = torch.Tensor([[x is not None for x in tb] for tb in target_batch])
targets = torch.Tensor([[0 if x is None else x for x in tb] for tb in target_batch])
return smiles_batch, batch, features_batch, mask, targets
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/torchvocab.py | .py | 6,636 | 191 | """
The contextual property.
"""
import pickle
from collections import Counter
from multiprocessing import Pool
import tqdm
from rdkit import Chem
from grover.data.task_labels import atom_to_vocab
from grover.data.task_labels import bond_to_vocab
class TorchVocab(object):
"""
Defines the vocabulary for atoms/bonds in molecular.
"""
def __init__(self, counter, max_size=None, min_freq=1, specials=('<pad>', '<other>'), vocab_type='atom'):
"""
:param counter:
:param max_size:
:param min_freq:
:param specials:
:param vocab_type: 'atom': atom atom_vocab; 'bond': bond atom_vocab.
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
if vocab_type in ('atom', 'bond'):
self.vocab_type = vocab_type
else:
raise ValueError('Wrong input for vocab_type!')
self.itos = list(specials)
max_size = None if max_size is None else max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.other_index = 1
self.pad_index = 0
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
# if self.vectors != other.vectors:
# return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
self.freqs[w] = 0
self.freqs[w] += v.freqs[w]
def mol_to_seq(self, mol, with_len=False):
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
if self.vocab_type == 'atom':
seq = [self.stoi.get(atom_to_vocab(mol, atom), self.other_index) for i, atom in enumerate(mol.GetAtoms())]
else:
seq = [self.stoi.get(bond_to_vocab(mol, bond), self.other_index) for i, bond in enumerate(mol.GetBonds())]
return (seq, len(seq)) if with_len else seq
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
class MolVocab(TorchVocab):
def __init__(self, smiles, max_size=None, min_freq=1, vocab_type='atom'):
if vocab_type in ('atom', 'bond'):
self.vocab_type = vocab_type
else:
raise ValueError('Wrong input for vocab_type!')
print("Building %s vocab from smiles: %d" % (self.vocab_type, len(smiles)))
counter = Counter()
for smi in tqdm.tqdm(smiles):
mol = Chem.MolFromSmiles(smi)
if self.vocab_type == 'atom':
for _, atom in enumerate(mol.GetAtoms()):
v = atom_to_vocab(mol, atom)
counter[v] += 1
else:
for _, bond in enumerate(mol.GetBonds()):
v = bond_to_vocab(mol, bond)
counter[v] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq, vocab_type=vocab_type)
def __init__(self, file_path, max_size=None, min_freq=1, num_workers=1, total_lines=None, vocab_type='atom'):
if vocab_type in ('atom', 'bond'):
self.vocab_type = vocab_type
else:
raise ValueError('Wrong input for vocab_type!')
print("Building %s vocab from file: %s" % (self.vocab_type, file_path))
from rdkit import RDLogger
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
if total_lines is None:
def file_len(fname):
f_len = 0
with open(fname) as f:
for f_len, _ in enumerate(f):
pass
return f_len + 1
total_lines = file_len(file_path)
counter = Counter()
pbar = tqdm.tqdm(total=total_lines)
pool = Pool(num_workers)
res = []
batch = 50000
callback = lambda a: pbar.update(batch)
for i in range(int(total_lines / batch + 1)):
start = int(batch * i)
end = min(total_lines, batch * (i + 1))
# print("Start: %d, End: %d"%(start, end))
res.append(pool.apply_async(MolVocab.read_smiles_from_file,
args=(file_path, start, end, vocab_type,),
callback=callback))
# read_smiles_from_file(lock, file_path, start, end)
pool.close()
pool.join()
for r in res:
sub_counter = r.get()
for k in sub_counter:
if k not in counter:
counter[k] = 0
counter[k] += sub_counter[k]
# print(counter)
super().__init__(counter, max_size=max_size, min_freq=min_freq, vocab_type=vocab_type)
@staticmethod
def read_smiles_from_file(file_path, start, end, vocab_type):
# print("start")
smiles = open(file_path, "r")
smiles.readline()
sub_counter = Counter()
for i, smi in enumerate(smiles):
if i < start:
continue
if i >= end:
break
mol = Chem.MolFromSmiles(smi)
if vocab_type == 'atom':
for atom in mol.GetAtoms():
v = atom_to_vocab(mol, atom)
sub_counter[v] += 1
else:
for bond in mol.GetBonds():
v = bond_to_vocab(mol, bond)
sub_counter[v] += 1
# print("end")
return sub_counter
@staticmethod
def load_vocab(vocab_path: str) -> 'MolVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/__init__.py | .py | 427 | 8 | from grover.data.molfeaturegenerator import get_available_features_generators, get_features_generator
from grover.data.molgraph import BatchMolGraph, get_atom_fdim, get_bond_fdim, mol2graph
from grover.data.molgraph import MolGraph, BatchMolGraph, MolCollator
from grover.data.moldataset import MoleculeDataset, MoleculeDatapoint
from grover.data.scaler import StandardScaler
# from .utils import load_features, save_features
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/task_labels.py | .py | 4,585 | 117 | """
The label generator for the pretraining.
"""
from collections import Counter
from typing import Callable, Union
import numpy as np
from rdkit import Chem
from descriptastorus.descriptors import rdDescriptors
from grover.data.molfeaturegenerator import register_features_generator
Molecule = Union[str, Chem.Mol]
FeaturesGenerator = Callable[[Molecule], np.ndarray]
# The functional group descriptors in RDkit.
RDKIT_PROPS = ['fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN',
'fr_Ar_COO', 'fr_Ar_N', 'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2',
'fr_C_O', 'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0',
'fr_NH1', 'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2',
'fr_Nhpyrrole', 'fr_SH', 'fr_aldehyde', 'fr_alkyl_carbamate', 'fr_alkyl_halide',
'fr_allylic_oxid', 'fr_amide', 'fr_amidine', 'fr_aniline', 'fr_aryl_methyl',
'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene', 'fr_benzodiazepine',
'fr_bicyclic', 'fr_diazo', 'fr_dihydropyridine', 'fr_epoxide', 'fr_ester',
'fr_ether', 'fr_furan', 'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone',
'fr_imidazole', 'fr_imide', 'fr_isocyan', 'fr_isothiocyan', 'fr_ketone',
'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone', 'fr_methoxy', 'fr_morpholine',
'fr_nitrile', 'fr_nitro', 'fr_nitro_arom', 'fr_nitro_arom_nonortho',
'fr_nitroso', 'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol',
'fr_phenol_noOrthoHbond', 'fr_phos_acid', 'fr_phos_ester', 'fr_piperdine',
'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd', 'fr_pyridine', 'fr_quatN',
'fr_sulfide', 'fr_sulfonamd', 'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole',
'fr_thiazole', 'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea']
BOND_FEATURES = ['BondType', 'Stereo', 'BondDir']
# BOND_FEATURES = ['BondType', 'Stereo']
# BOND_FEATURES = ['Stereo']
@register_features_generator('fgtasklabel')
def rdkit_functional_group_label_features_generator(mol: Molecule) -> np.ndarray:
"""
Generates functional group label for a molecule using RDKit.
:param mol: A molecule (i.e. either a SMILES string or an RDKit molecule).
:return: A 1D numpy array containing the RDKit 2D features.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
generator = rdDescriptors.RDKit2D(RDKIT_PROPS)
features = generator.process(smiles)[1:]
features = np.array(features)
features[features != 0] = 1
return features
def atom_to_vocab(mol, atom):
"""
Convert atom to vocabulary. The convention is based on atom type and bond type.
:param mol: the molecular.
:param atom: the target atom.
:return: the generated atom vocabulary with its contexts.
"""
nei = Counter()
for a in atom.GetNeighbors():
bond = mol.GetBondBetweenAtoms(atom.GetIdx(), a.GetIdx())
nei[str(a.GetSymbol()) + "-" + str(bond.GetBondType())] += 1
keys = nei.keys()
keys = list(keys)
keys.sort()
output = atom.GetSymbol()
for k in keys:
output = "%s_%s%d" % (output, k, nei[k])
# The generated atom_vocab is too long?
return output
def bond_to_vocab(mol, bond):
"""
Convert bond to vocabulary. The convention is based on atom type and bond type.
Considering one-hop neighbor atoms
:param mol: the molecular.
:param atom: the target atom.
:return: the generated bond vocabulary with its contexts.
"""
nei = Counter()
two_neighbors = (bond.GetBeginAtom(), bond.GetEndAtom())
two_indices = [a.GetIdx() for a in two_neighbors]
for nei_atom in two_neighbors:
for a in nei_atom.GetNeighbors():
a_idx = a.GetIdx()
if a_idx in two_indices:
continue
tmp_bond = mol.GetBondBetweenAtoms(nei_atom.GetIdx(), a_idx)
nei[str(nei_atom.GetSymbol()) + '-' + get_bond_feature_name(tmp_bond)] += 1
keys = list(nei.keys())
keys.sort()
output = get_bond_feature_name(bond)
for k in keys:
output = "%s_%s%d" % (output, k, nei[k])
return output
def get_bond_feature_name(bond):
"""
Return the string format of bond features.
Bond features are surrounded with ()
"""
ret = []
for bond_feature in BOND_FEATURES:
fea = eval(f"bond.Get{bond_feature}")()
ret.append(str(fea))
return '(' + '-'.join(ret) + ')'
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/molfeaturegenerator.py | .py | 5,497 | 147 | """
The registered feature generator for molecules.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/features/features_generators.py
"""
from typing import Callable, List, Union
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
Molecule = Union[str, Chem.Mol]
FeaturesGenerator = Callable[[Molecule], np.ndarray]
FEATURES_GENERATOR_REGISTRY = {}
def register_features_generator(features_generator_name: str) -> Callable[[FeaturesGenerator], FeaturesGenerator]:
"""
Registers a features generator.
:param features_generator_name: The name to call the FeaturesGenerator.
:return: A decorator which will add a FeaturesGenerator to the registry using the specified name.
"""
def decorator(features_generator: FeaturesGenerator) -> FeaturesGenerator:
FEATURES_GENERATOR_REGISTRY[features_generator_name] = features_generator
return features_generator
return decorator
def get_features_generator(features_generator_name: str) -> FeaturesGenerator:
"""
Gets a registered FeaturesGenerator by name.
:param features_generator_name: The name of the FeaturesGenerator.
:return: The desired FeaturesGenerator.
"""
if features_generator_name not in FEATURES_GENERATOR_REGISTRY:
raise ValueError(f'Features generator "{features_generator_name}" could not be found. '
f'If this generator relies on rdkit features, you may need to install descriptastorus.')
return FEATURES_GENERATOR_REGISTRY[features_generator_name]
def get_available_features_generators() -> List[str]:
"""Returns the names of available features generators."""
return list(FEATURES_GENERATOR_REGISTRY.keys())
MORGAN_RADIUS = 2
MORGAN_NUM_BITS = 2048
@register_features_generator('morgan')
def morgan_binary_features_generator(mol: Molecule,
radius: int = MORGAN_RADIUS,
num_bits: int = MORGAN_NUM_BITS) -> np.ndarray:
"""
Generates a binary Morgan fingerprint for a molecule.
:param mol: A molecule (i.e. either a SMILES string or an RDKit molecule).
:param radius: Morgan fingerprint radius.
:param num_bits: Number of bits in Morgan fingerprint.
:return: A 1-D numpy array containing the binary Morgan fingerprint.
"""
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=num_bits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features
@register_features_generator('morgan_count')
def morgan_counts_features_generator(mol: Molecule,
radius: int = MORGAN_RADIUS,
num_bits: int = MORGAN_NUM_BITS) -> np.ndarray:
"""
Generates a counts-based Morgan fingerprint for a molecule.
:param mol: A molecule (i.e. either a SMILES string or an RDKit molecule).
:param radius: Morgan fingerprint radius.
:param num_bits: Number of bits in Morgan fingerprint.
:return: A 1D numpy array containing the counts-based Morgan fingerprint.
"""
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
features_vec = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
@register_features_generator('rdkit_2d')
def rdkit_2d_features_generator(mol: Molecule) -> np.ndarray:
"""
Generates RDKit 2D features for a molecule.
:param mol: A molecule (i.e. either a SMILES string or an RDKit molecule).
:return: A 1D numpy array containing the RDKit 2D features.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
generator = rdDescriptors.RDKit2D()
features = generator.process(smiles)[1:]
return features
@register_features_generator('rdkit_2d_normalized')
def rdkit_2d_features_normalized_generator(mol: Molecule) -> np.ndarray:
"""
Generates RDKit 2D normalized features for a molecule.
:param mol: A molecule (i.e. either a SMILES string or an RDKit molecule).
:return: A 1D numpy array containing the RDKit 2D normalized features.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = generator.process(smiles)[1:]
return features
except ImportError:
pass
"""
Custom features generator template.
Note: The name you use to register the features generator is the name
you will specify on the command line when using the --features_generator <name> flag.
Ex. python train.py ... --features_generator custom ...
@register_features_generator('custom')
def custom_features_generator(mol: Molecule) -> np.ndarray:
# If you want to use the SMILES string
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
# If you want to use the RDKit molecule
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
# Replace this with code which generates features from the molecule
features = np.array([0, 0, 1])
return features
"""
| Python |
3D | antecede/EZSpecificity | other_softwares/grover_software/grover/data/scaler.py | .py | 2,854 | 71 | """
The scaler for the regression task.
This implementation is adapted from
https://github.com/chemprop/chemprop/blob/master/chemprop/data/scaler.py
"""
from typing import Any, List
import numpy as np
class StandardScaler:
"""A StandardScaler normalizes a dataset.
When fit on a dataset, the StandardScaler learns the mean and standard deviation across the 0th axis.
When transforming a dataset, the StandardScaler subtracts the means and divides by the standard deviations.
"""
def __init__(self, means: np.ndarray = None, stds: np.ndarray = None, replace_nan_token: Any = None):
"""
Initialize StandardScaler, optionally with means and standard deviations precomputed.
:param means: An optional 1D numpy array of precomputed means.
:param stds: An optional 1D numpy array of precomputed standard deviations.
:param replace_nan_token: The token to use in place of nans.
"""
self.means = means
self.stds = stds
self.replace_nan_token = replace_nan_token
def fit(self, X: List[List[float]]) -> 'StandardScaler':
"""
Learns means and standard deviations across the 0th axis.
:param X: A list of lists of floats.
:return: The fitted StandardScaler.
"""
X = np.array(X).astype(float)
self.means = np.nanmean(X, axis=0)
self.stds = np.nanstd(X, axis=0)
self.means = np.where(np.isnan(self.means), np.zeros(self.means.shape), self.means)
self.stds = np.where(np.isnan(self.stds), np.ones(self.stds.shape), self.stds)
self.stds = np.where(self.stds == 0, np.ones(self.stds.shape), self.stds)
return self
def transform(self, X: List[List[float]]):
"""
Transforms the data by subtracting the means and dividing by the standard deviations.
:param X: A list of lists of floats.
:return: The transformed data.
"""
X = np.array(X).astype(float)
transformed_with_nan = (X - self.means) / self.stds
transformed_with_none = np.where(np.isnan(transformed_with_nan), self.replace_nan_token, transformed_with_nan)
return transformed_with_none
def inverse_transform(self, X: List[List[float]]):
"""
Performs the inverse transformation by multiplying by the standard deviations and adding the means.
:param X: A list of lists of floats.
:return: The inverse transformed data.
"""
if isinstance(X, np.ndarray) or isinstance(X, list):
X = np.array(X).astype(float)
transformed_with_nan = X * self.stds + self.means
transformed_with_none = np.where(np.isnan(transformed_with_nan),
self.replace_nan_token, transformed_with_nan)
return transformed_with_none
| Python |
3D | antecede/EZSpecificity | Models/cpi.py | .py | 8,567 | 233 | import logging
import torch
import numpy as np
import torch.nn as nn
import pytorch_lightning as pl
class CPI(pl.LightningModule):
"""FFN."""
def __init__(
self,
config,
**kwargs,
):
"""__init__.
Args:
"""
super(CPI, self).__init__(**kwargs)
self.config = config
self.best_val_auc = 0
self.best_val_aupr = 0
self.logits = None
self.labels = None
layers = config.model.layers
hidden_size = config.model.hidden_size
model_dropout = config.model.model_dropout
num_tasks = config.model.num_tasks
prot_emb_size = config.model.prot_emb_size
chem_emb_size = config.model.chem_emb_size
interaction_layer = config.model.interaction_layer
self.interaction_layer = interaction_layer
if interaction_layer == "concat":
in_features = prot_emb_size + chem_emb_size
self.input_layer = nn.Linear(
in_features=in_features, out_features=hidden_size, bias=True
)
elif interaction_layer == "dot":
self.input_layer_prot = nn.Linear(
in_features=prot_emb_size, out_features=hidden_size, bias=True
)
self.input_layer_chem = nn.Linear(
in_features=chem_emb_size, out_features=hidden_size, bias=True
)
else:
raise ValueError(f"Unexpected value {interaction_layer}")
## Middle layers
sequential_layers = []
for i in range(layers - 1):
# Add activation before each middle layer s.t. we can use this for
# standard logistic regression without any activation
sequential_layers.append(torch.nn.ReLU())
new_layer = nn.Linear(
in_features=hidden_size, out_features=hidden_size, bias=True
)
sequential_layers.append(new_layer)
sequential_layers.append(nn.Dropout(p=model_dropout))
self.inner_layers = nn.Sequential(*sequential_layers)
# Have multiple outputs
self.num_tasks = num_tasks
self.out_features = self.num_tasks
self.output_layer = nn.Sequential(
torch.nn.ReLU(),
nn.Linear(
in_features=hidden_size, out_features=self.out_features, bias=True
),
)
def forward(self, batch, **kwargs):
"""Forward pass, return logits"""
rxn_features, prot_features = batch
feature_ar = []
if self.interaction_layer == "concat":
feature_ar.append(rxn_features)
feature_ar.append(prot_features)
# Concatenate the features then run module
in_features = torch.cat(feature_ar, 1)
output = self.input_layer(in_features)
elif self.interaction_layer == "dot":
feature_ar.append(prot_features)
if len(feature_ar) <= 0:
raise RuntimeError("Expected prot or ec features for ffn network")
# Concatenate the features then run module
rxn_repr = rxn_features
protein_repr = torch.cat(feature_ar, 1)
protein_repr = self.input_layer_prot(protein_repr)
rxn_repr = self.input_layer_chem(rxn_repr)
# Take element wise product between representations!
output = torch.einsum("bd,bd->bd", protein_repr, rxn_repr)
else:
raise ValueError(f"Bad interaction layer value: {self.interaction_layer}")
output = self.inner_layers(output)
output = self.output_layer(output)
return output
def get_loss(self, label, logits):
loss = nn.BCEWithLogitsLoss()(logits.ravel(), label.ravel())
return loss
def training_step(self, batch, batch_idx):
reaction = batch.x
enzyme = batch.enzyme
label = batch.label
loss = self.get_loss(label, self((reaction, enzyme)))
return loss
def evaluate(self, batch, stage=None):
reaction = batch.x
enzyme = batch.enzyme
label = batch.label
tag = batch.tag
prediction = self((reaction, enzyme))
return prediction.detach().cpu(), label.detach().cpu(), tag.detach().cpu()
def test_step(self, batch, batch_idx):
return self.evaluate(batch, 'test')
def validation_step(self, batch, batch_idx):
return self.evaluate(batch, 'val')
def get_auc_aupr(self, logits, label, name, stage):
# logits: [B]
# label: [B]
from sklearn import metrics
logits = logits.ravel()
label = label.ravel()
fpr, tpr, thresholds = metrics.roc_curve(label, logits, pos_label=1)
# precision, recall, thresholds = metrics.precision_recall_curve(label, logits)
if name is not None:
self.log(f"{stage}_{name}_auc", metrics.auc(fpr, tpr))
self.log(f"{stage}_{name}_aupr", metrics.average_precision_score(label, logits))
else:
self.log(f"{stage}_auc", metrics.auc(fpr, tpr))
self.log(f"{stage}_aupr", metrics.average_precision_score(label, logits))
auc = metrics.auc(fpr, tpr)
aupr = metrics.average_precision_score(label, logits)
if stage == 'val' and name is None:
if auc > self.best_val_auc:
self.best_val_auc = auc
self.best_val_aupr = aupr
self.log(f"best_{stage}_auc", self.best_val_auc)
self.log(f"best_{stage}_aupr", self.best_val_aupr)
return auc, aupr
def cutomized_epoch_end(self, outputs, stage):
from collections import defaultdict
logits = np.concatenate([a[0] for a in outputs], axis=0)
labels = np.concatenate([a[1] for a in outputs], axis=0)
tags = np.concatenate([a[2] for a in outputs], axis=0)
self.labels = labels
self.logits = logits
self.get_auc_aupr(logits, labels, None, stage)
logits_dict = defaultdict(list)
labels_dict = defaultdict(list)
# for i in range(len(outputs[0][2])):
for index, (label, tag) in enumerate(zip(labels, tags)):
# if label == 1 and tag != 5:
# for i in range(5):
# logits_dict[i].append(self.logits[index])
# labels_dict[i].append(self.labels[index])
# else:
logits_dict[tag].append(self.logits[index])
labels_dict[tag].append(self.labels[index])
for key, item in logits_dict.items():
logits = np.array(item)
labels = np.array(labels_dict[key])
self.get_auc_aupr(logits, labels, key, stage)
self.logits_dict = logits_dict
self.labels_dict = labels_dict
def validation_epoch_end(self, outputs):
self.cutomized_epoch_end(outputs, 'val')
def test_epoch_end(self, outputs):
self.cutomized_epoch_end(outputs, 'test')
def configure_optimizers(self):
from warmup_scheduler import GradualWarmupScheduler
if self.config.training.optimizer == "adam":
self.optimizer = optimizer = torch.optim.Adam(self.parameters(), lr=self.config.training.lr)
elif self.config.training.optimizer == "sgd":
self.optimizer = optimizer = torch.optim.SGD(self.parameters(), lr=self.config.training.lr)
elif self.config.training.optimizer == "rmsport":
self.optimizer = optimizer = torch.optim.RMSprop(self.parameters(), lr=self.config.training.lr)
second_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=self.config.training.sched_factor,
patience=self.config.training.sched_patience,
min_lr=self.config.training.min_lr, mode='max'
)
# first_scheduler = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=second_scheduler)
# print(first_scheduler.get_lr())
lr_scheduler = {
"scheduler": second_scheduler,
'interval': 'epoch',
'frequency': self.config.training.val_frequency,
'strict': True,
"monitor": "val_auc"
}
return [optimizer], [lr_scheduler] | Python |
3D | antecede/EZSpecificity | Models/common.py | .py | 11,502 | 348 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss
from torch_scatter import scatter_mean, scatter_add
from math import pi as PI
def split_tensor_by_batch(x, batch, num_graphs=None):
"""
Args:
x: (N, ...)
batch: (B, )
Returns:
[(N_1, ), (N_2, ) ..., (N_B, ))]
"""
if num_graphs is None:
num_graphs = batch.max().item() + 1
x_split = []
for i in range (num_graphs):
mask = batch == i
x_split.append(x[mask])
return x_split
def concat_tensors_to_batch(x_split):
x = torch.cat(x_split, dim=0)
batch = torch.repeat_interleave(
torch.arange(len(x_split)),
repeats=torch.LongTensor([s.size(0) for s in x_split])
).to(device=x.device)
return x, batch
def split_tensor_to_segments(x, segsize):
num_segs = math.ceil(x.size(0) / segsize)
segs = []
for i in range(num_segs):
segs.append(x[i*segsize : (i+1)*segsize])
return segs
def split_tensor_by_lengths(x, lengths):
segs = []
for l in lengths:
segs.append(x[:l])
x = x[l:]
return segs
def batch_intersection_mask(batch, batch_filter):
batch_filter = batch_filter.unique()
mask = (batch.view(-1, 1) == batch_filter.view(1, -1)).any(dim=1)
return mask
class MeanReadout(nn.Module):
"""Mean readout operator over graphs with variadic sizes."""
def forward(self, input, batch, num_graphs):
"""
Perform readout over the graph(s).
Parameters:
data (torch_geometric.data.Data): batched graph
input (Tensor): node representations
Returns:
Tensor: graph representations
"""
output = scatter_mean(input, batch, dim=0, dim_size=num_graphs)
return output
class SumReadout(nn.Module):
"""Sum readout operator over graphs with variadic sizes."""
def forward(self, input, batch, num_graphs):
"""
Perform readout over the graph(s).
Parameters:
data (torch_geometric.data.Data): batched graph
input (Tensor): node representations
Returns:
Tensor: graph representations
"""
output = scatter_add(input, batch, dim=0, dim_size=num_graphs)
return output
class MultiLayerPerceptron(nn.Module):
"""
Multi-layer Perceptron.
Note there is no activation or dropout in the last layer.
Parameters:
input_dim (int): input dimension
hidden_dim (list of int): hidden dimensions
activation (str or function, optional): activation function
dropout (float, optional): dropout rate
"""
def __init__(self, input_dim, hidden_dims, activation="relu", dropout=0):
super(MultiLayerPerceptron, self).__init__()
self.dims = [input_dim] + hidden_dims
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = None
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.layers = nn.ModuleList()
for i in range(len(self.dims) - 1):
self.layers.append(nn.Linear(self.dims[i], self.dims[i + 1]))
def forward(self, input):
""""""
x = input
for i, layer in enumerate(self.layers):
x = layer(x)
if i < len(self.layers) - 1:
if self.activation:
x = self.activation(x)
if self.dropout:
x = self.dropout(x)
return x
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.beta = nn.Parameter(torch.tensor(1.0))
def forward(self, x):
return x * torch.sigmoid(self.beta * x)
NONLINEARITIES = {
"tanh": nn.Tanh(),
"relu": nn.ReLU(),
"softplus": nn.Softplus(),
"elu": nn.ELU(),
"swish": Swish(),
"silu": nn.SiLU(),
}
class MLP(nn.Module):
"""MLP with the same hidden dim across all layers."""
def __init__(self, in_dim, out_dim, hidden_dim, num_layer=2, norm='layer', act_fn='relu', act_last=False):
super().__init__()
layers = []
for layer_idx in range(num_layer):
if layer_idx == 0:
layers.append(nn.Linear(in_dim, hidden_dim))
elif layer_idx == num_layer - 1:
layers.append(nn.Linear(hidden_dim, out_dim))
else:
layers.append(nn.Linear(hidden_dim, hidden_dim))
if layer_idx < num_layer - 1 or act_last:
if norm == 'layer':
layers.append(nn.LayerNorm(hidden_dim))
elif norm == 'batch':
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(NONLINEARITIES[act_fn])
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
class SmoothCrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth_one_hot(targets:torch.Tensor, n_classes:int, smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = torch.empty(size=(targets.size(0), n_classes),
device=targets.device) \
.fill_(smoothing /(n_classes-1)) \
.scatter_(1, targets.data.unsqueeze(1), 1.-smoothing)
return targets
def forward(self, inputs, targets):
targets = SmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1),
self.smoothing)
lsm = F.log_softmax(inputs, -1)
if self.weight is not None:
lsm = lsm * self.weight.unsqueeze(0)
loss = -(targets * lsm).sum(-1)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
class GaussianSmearing(nn.Module):
def __init__(self, start=0.0, stop=10.0, num_gaussians=50):
super().__init__()
offset = torch.linspace(start, stop, num_gaussians)
self.coeff = -0.5 / (offset[1] - offset[0]).item()**2
self.register_buffer('offset', offset)
def forward(self, dist):
dist = dist.view(-1, 1) - self.offset.view(1, -1)
return torch.exp(self.coeff * torch.pow(dist, 2))
class SinRBF(nn.Module):
def __init__(self, cutoff=10., start=1, stop=20, num_centers=20):
super().__init__()
coeff = (torch.linspace(start, stop, num_centers) * PI) / cutoff
self.register_buffer('coeff', coeff)
@property
def output_dim(self):
return len(self.coeff)
def forward(self, dist):
return torch.sin(self.coeff.view(1, -1) * dist.view(-1, 1)) / dist.view(-1, 1)
class ShiftedSoftplus(nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
def compose_context(h_protein, h_ligand, pos_protein, pos_ligand, batch_protein, batch_ligand):
batch_ctx = torch.cat([batch_protein, batch_ligand], dim=0)
sort_idx = batch_ctx.argsort()
mask_ligand = torch.cat([
torch.zeros([batch_protein.size(0)], device=batch_protein.device).bool(),
torch.ones([batch_ligand.size(0)], device=batch_ligand.device).bool(),
], dim=0)[sort_idx]
batch_ctx = batch_ctx[sort_idx]
h_ctx = torch.cat([h_protein, h_ligand], dim=0)[sort_idx] # (N_protein+N_ligand, H)
pos_ctx = torch.cat([pos_protein, pos_ligand], dim=0)[sort_idx] # (N_protein+N_ligand, 3)
return h_ctx, pos_ctx, batch_ctx, mask_ligand
def compose_batch_context(h_protein, h_ligand, num_protein_nodes, num_ligand_nodes):
all_h_ctx = []
for i, (np, nl) in enumerate(zip(num_protein_nodes, num_ligand_nodes)):
h_ctx = torch.cat([h_protein[i, :np], h_ligand[i, :nl], h_protein[i, np:], h_ligand[i, nl:]], dim=0)
all_h_ctx.append(h_ctx)
h_ctx = torch.stack(all_h_ctx)
return h_ctx
def get_complete_graph(batch):
"""
Args:
batch: Batch index.
Returns:
edge_index: (2, N_1 + N_2 + ... + N_{B-1}), where N_i is the number of nodes of the i-th graph.
neighbors: (B, ), number of edges per graph.
"""
natoms = scatter_add(torch.ones_like(batch), index=batch, dim=0)
natoms_sqr = (natoms ** 2).long()
num_atom_pairs = torch.sum(natoms_sqr)
natoms_expand = torch.repeat_interleave(natoms, natoms_sqr)
index_offset = torch.cumsum(natoms, dim=0) - natoms
index_offset_expand = torch.repeat_interleave(index_offset, natoms_sqr)
index_sqr_offset = torch.cumsum(natoms_sqr, dim=0) - natoms_sqr
index_sqr_offset = torch.repeat_interleave(index_sqr_offset, natoms_sqr)
atom_count_sqr = torch.arange(num_atom_pairs, device=num_atom_pairs.device) - index_sqr_offset
index1 = (atom_count_sqr // natoms_expand).long() + index_offset_expand
index2 = (atom_count_sqr % natoms_expand).long() + index_offset_expand
edge_index = torch.cat([index1.view(1, -1), index2.view(1, -1)])
mask = torch.logical_not(index1 == index2)
edge_index = edge_index[:, mask]
num_edges = natoms_sqr - natoms # Number of edges per graph
return edge_index, num_edges
def compose_context_stable(h_protein, h_ligand, pos_protein, pos_ligand, batch_protein, batch_ligand):
num_graphs = batch_ligand.max().item() + 1
batch_ctx = []
h_ctx = []
pos_ctx = []
mask_protein = []
for i in range(num_graphs):
mask_p, mask_l = (batch_protein == i), (batch_ligand == i)
batch_p, batch_l = batch_protein[mask_p], batch_ligand[mask_l]
batch_ctx += [batch_p, batch_l]
h_ctx += [h_protein[mask_p], h_ligand[mask_l]]
pos_ctx += [pos_protein[mask_p], pos_ligand[mask_l]]
mask_protein += [
torch.ones([batch_p.size(0)], device=batch_p.device, dtype=torch.bool),
torch.zeros([batch_l.size(0)], device=batch_l.device, dtype=torch.bool),
]
batch_ctx = torch.cat(batch_ctx, dim=0)
h_ctx = torch.cat(h_ctx, dim=0)
pos_ctx = torch.cat(pos_ctx, dim=0)
mask_protein = torch.cat(mask_protein, dim=0)
return h_ctx, pos_ctx, batch_ctx, mask_protein
if __name__ == '__main__':
h_protein = torch.randn([60, 64])
h_ligand = -torch.randn([33, 64])
pos_protein = torch.clamp(torch.randn([60, 3]), 0, float('inf'))
pos_ligand = torch.clamp(torch.randn([33, 3]), float('-inf'), 0)
batch_protein = torch.LongTensor([0]*10 + [1]*20 + [2]*30)
batch_ligand = torch.LongTensor([0]*11 + [1]*11 + [2]*11)
h_ctx, pos_ctx, batch_ctx, mask_protein = compose_context_stable(h_protein, h_ligand, pos_protein, pos_ligand, batch_protein, batch_ligand)
assert (batch_ctx[mask_protein] == batch_protein).all()
assert (batch_ctx[torch.logical_not(mask_protein)] == batch_ligand).all()
assert torch.allclose(h_ctx[torch.logical_not(mask_protein)], h_ligand)
assert torch.allclose(h_ctx[mask_protein], h_protein)
assert torch.allclose(pos_ctx[torch.logical_not(mask_protein)], pos_ligand)
assert torch.allclose(pos_ctx[mask_protein], pos_protein) | Python |
3D | antecede/EZSpecificity | Models/ss.py | .py | 13,221 | 284 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from collections import defaultdict
from Models.Structure.structure import Graph
from Models.common import MLP
NONLINEARITIES = {
"tanh": nn.Tanh(),
"relu": nn.ReLU(),
"softplus": nn.Softplus(),
"elu": nn.ELU()
}
class Aggregator(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim, num_layer=1, norm='None', act_last=False, act_fn='relu'):
super().__init__()
self.mlp = MLP(in_dim=input_dim, out_dim=output_dim, hidden_dim=hidden_dim, num_layer=num_layer, norm=norm, act_last=act_last, act_fn=act_fn)
# self.norm = nn.BatchNorm1d(input_dim)
def forward(self, x):
x = self.mlp(torch.cat(x, dim=-1))
return x
class SS(pl.LightningModule):
def __init__(self, config):
super().__init__()
# Build the network
self.config = config
self.hidden_dim = config.model.hidden_dim
cross_attention_config = config.model.cross_attention
graph_config = config.model.graph
transform_config = config.transform
header_config = config.model.header
# component flag
self.use_gnn = config.model.use_gnn
self.use_attention = True
try:
self.use_attention = cross_attention_config.use_attention
except:
pass
# sequence_encoder
## environment_net
self.graph_net = Graph(graph_config, transform_config)
# output: [n_atom * embeding_length (10*128)]
## enzyme_net
self.protein_mlp = nn.Linear(1280, self.hidden_dim)
# output: [n_amino_acid * embeding_length (300*128)]
# substarte atom based feature (sequence-based atom features, structure-based atom features, external atom based embedding (grover))
self.atom_feature_mlp_dict = nn.ModuleDict(
{
self.config.data.atom_features[i-1]: nn.Sequential(
nn.Linear(int(self.config.data.atom_features[i]), self.hidden_dim),
nn.LayerNorm(self.hidden_dim) if self.config.model.feature_norm else nn.Identity()
) for i in range(1, len(self.config.data.atom_features), 2)
}
)
self.atom_feature_aggregator = Aggregator(input_dim=self.hidden_dim*(int(self.config.model.use_gnn) + len(self.config.data.atom_features)//2), output_dim=self.hidden_dim, hidden_dim=self.hidden_dim, num_layer=1)
# output: [n_atom * embeding_length (10*128)]
# cross attention (reaction atom based feature, enzyme atom based feature)
if self.use_attention:
self.enzyme_attention = nn.MultiheadAttention(embed_dim=self.hidden_dim, num_heads=cross_attention_config. n_head, batch_first=True, dropout=cross_attention_config.dropout)
self.reaction_attention = nn.MultiheadAttention(embed_dim=self.hidden_dim, num_heads=cross_attention_config.n_head, batch_first=True, dropout=cross_attention_config.dropout)
# specificity prediction
# (weighted sum substrate sequence embedding, weighted sum enzyme sequence embedding, avg substrate sequence embedding, avg enzyme sequence embedding, structure embedding, grover embedding (substrate-based), morgan embedding)
self.feature_mlp_dict = nn.ModuleDict(
{
self.config.data.features[i-1]: nn.Sequential(
nn.Linear(int(self.config.data.features[i]), self.hidden_dim),
nn.LayerNorm(self.hidden_dim) if self.config.model.feature_norm else nn.Identity()
) for i in range(1, len(self.config.data.features), 2)
}
)
self.specificity_header = Aggregator(input_dim=self.hidden_dim*(2 + self.use_attention * 2 + self.config.model.use_gnn + len(self.config.data.features)//2), output_dim=1, hidden_dim=self.hidden_dim, num_layer=header_config.num_layers, norm=header_config.norm, act_fn=header_config.act_fn)
def _get_graph_output(self, G, B):
ligand_embedding = torch.zeros(B, 281, self.hidden_dim).to(self.device)
output, (h, batch, index) = self.graph_net(G)
ligand_embedding[batch, index, :] = h
return output, ligand_embedding[:, :280, :]
def forward(self, G):
# TODO: change to max enzyme length and max reaction length
x_pro = self.protein_mlp(G.embedding)
x_pro = x_pro.view(-1, 1450, self.hidden_dim)
# get structure embedding (mean and atom embedding)
# get atom embedding (sequence (x_reaction), structure (structure_x_reaction), grover (grover))
atom_features = []
if self.config.model.use_gnn:
str_mean, x_str = self._get_graph_output(G, x_pro.shape[0])
atom_features.append(x_str)
if "grover" in self.config.data.atom_features:
grover = G.grover.view(-1, 280, int(self.config.data.atom_features[1]))
grover = self.atom_feature_mlp_dict["grover"](grover)
atom_features.append(grover)
x_reaction = self.atom_feature_aggregator(atom_features)
# start attention
if self.use_attention:
weighted_sum_pro, _ = self.enzyme_attention(x_pro, x_reaction,
x_reaction, need_weights=True, key_padding_mask=G.reaction_padding_mask)
weighted_sum_reaction, _ = self.reaction_attention(x_reaction, x_pro,
x_pro, need_weights=True, key_padding_mask=G.enzyme_padding_mask)
## calculate embedding after attention
### shape:
### x.enzyme_padding_mask: [B, len_pro]
### x.reaction_padding_mask: [B, len_reaction]
### x_pro: [B, len_pro, len_embed]
### x_reaction: [B, len_reaction, len_embed]
reaction_mask = (~G.reaction_padding_mask).unsqueeze(-1).float()
enzyme_mask = (~G.enzyme_padding_mask).unsqueeze(-1).float()
### reaction_mask: [B, len_reaction, 1]
### enzyme_mask: [B, len_pro, 1]
x_reaction = (x_reaction * reaction_mask).sum(dim=1) / reaction_mask.sum(dim=(1,2)).unsqueeze(-1)
x_pro = (x_pro * enzyme_mask).sum(dim=1) / enzyme_mask.sum(dim=(1,2)).unsqueeze(-1)
if self.use_attention:
weighted_sum_reaction = (weighted_sum_reaction * reaction_mask).sum(dim=1) / reaction_mask.sum(dim=(1,2)).unsqueeze(-1)
weighted_sum_pro = (weighted_sum_pro * enzyme_mask).sum(dim=1) / enzyme_mask.sum(dim=(1,2)).unsqueeze(-1)
# specificity output
embeddings = [x_pro, x_reaction]
if self.use_attention:
embeddings.extend([weighted_sum_pro, weighted_sum_reaction])
if self.config.model.use_gnn:
embeddings.extend([str_mean])
# embeddings.append(str_mean)
if "grover_mean" in self.config.data.features:
embeddings.append(self.feature_mlp_dict["grover_mean"](G.grover_mean))
if "morgan" in self.config.data.features:
embeddings.append(self.feature_mlp_dict["morgan"](G.morgan))
specificity_output = self.specificity_header(embeddings)
return specificity_output, [G.tag, G.str_tag]
def get_loss(self, x, logits, stage):
logits = logits.squeeze(-1)
loss = (torch.nn.BCEWithLogitsLoss(reduction='none')(logits.ravel(), x.label.ravel()) * x.sample_weight).mean()
self.log(f"sp_loss/{stage}", loss)
return loss
def training_step(self, batch, batch_idx):
logits, tag = self(batch)
loss = self.get_loss(batch, logits, "train")
return loss
def evaluate(self, x, stage=None):
sp_logits, tag = self(x)
self.get_loss(x, sp_logits, stage)
return sp_logits.detach().cpu(), x.label.detach().cpu(), tag
def test_step(self, batch, batch_idx):
return self.evaluate(batch, 'test')
def validation_step(self, batch, batch_idx):
return self.evaluate(batch, 'val')
def _print(self, a):
a = a.tolist()
a = [str(x) for x in a]
print(",".join(a))
def get_auc_aupr(self, logits, label, name, stage):
# logits: [B]
# label: [B]
from sklearn import metrics
logits = logits.ravel()
label = label.ravel()
fpr, tpr, thresholds = metrics.roc_curve(label, logits, pos_label=1)
# precision, recall, thresholds = metrics.precision_recall_curve(label, logits)
if name is not None:
self.log(f"auc/{name}/{stage}", metrics.auc(fpr, tpr))
self.log(f"aupr/{name}/{stage}", metrics.average_precision_score(label, logits))
print(f"auc/{name}/{stage}", metrics.auc(fpr, tpr))
else:
self.log(f"auc/{stage}", metrics.auc(fpr, tpr))
self.log(f"aupr/{stage}", metrics.average_precision_score(label, logits))
print(f"auc/{stage}", metrics.auc(fpr, tpr))
return metrics.auc(fpr, tpr), metrics.average_precision_score(label, logits)
def cutomized_epoch_end(self, outputs, stage):
sp_logits = np.concatenate([a[0] for a in outputs], axis=0)
sp_labels = np.concatenate([a[1] for a in outputs], axis=0)
self.get_auc_aupr(sp_logits, sp_labels, None, stage)
logits_dict = defaultdict(list)
labels_dict = defaultdict(list)
for index, output in enumerate(outputs):
for logits, label, tag_0, tag_1 in zip(output[0], output[1], output[2][0], output[2][1]):
tag = (tag_0, tag_1)
# if label == 1 and tag[0] != "5":
# for i in range(5):
# logits_dict[str(i)].append(logits)
# labels_dict[str(i)].append(label)
# logits_dict[str(i) + "-" + tag[1]].append(logits)
# labels_dict[str(i) + "-" + tag[1]].append(label)
# else:
logits_dict[tag[0]].append(logits)
labels_dict[tag[0]].append(label)
logits_dict[tag[0] + "-" + tag[1]].append(logits)
labels_dict[tag[0] + "-" + tag[1]].append(label)
logits_dict[tag[1]].append(logits)
labels_dict[tag[1]].append(label)
for key, item in logits_dict.items():
logits = np.array(item)
labels = np.array(labels_dict[key])
self.get_auc_aupr(logits, labels, key, stage)
self.logits = sp_logits
self.labels = sp_labels
self.logits_dict = logits_dict
self.labels_dict = labels_dict
def validation_epoch_end(self, outputs):
self.cutomized_epoch_end(outputs, 'val')
def test_epoch_end(self, outputs):
self.cutomized_epoch_end(outputs, 'test')
def configure_optimizers(self):
from warmup_scheduler import GradualWarmupScheduler
if self.config.training.optimizer == "adam":
self.optimizer = optimizer = torch.optim.Adam(self.parameters(), lr=self.config.training.lr)
elif self.config.training.optimizer == "sgd":
self.optimizer = optimizer = torch.optim.SGD(self.parameters(), lr=self.config.training.lr)
elif self.config.training.optimizer == "rmsport":
self.optimizer = optimizer = torch.optim.RMSprop(self.parameters(), lr=self.config.training.lr)
elif self.config.training.optimizer == "adamW":
self.optimizer = optimizer = torch.optim.AdamW(self.parameters(), lr=self.config.training.lr)
second_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=self.config.training.sched_factor,
patience=self.config.training.sched_patience,
min_lr=self.config.training.min_lr, mode='max'
)
# first_scheduler = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=second_scheduler)
# print(first_scheduler.get_lr())
lr_scheduler = {
"scheduler": second_scheduler,
'interval': 'epoch',
'frequency': self.config.training.val_frequency,
'strict': True,
"monitor": "aupr/val"
}
return [optimizer], [lr_scheduler]
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
if epoch < self.config.training.warmup_epochs:
lr = 1. * epoch / self.config.training.warmup_epochs * (self.config.training.lr - self.config.training.min_lr) + self.config.training.min_lr
for pg in optimizer.param_groups:
pg['lr'] = lr
elif epoch == self.config.training.warmup_epochs:
for pg in optimizer.param_groups:
pg['lr'] = self.config.training.lr
optimizer.step(closure=optimizer_closure) | Python |
3D | antecede/EZSpecificity | Models/utils.py | .py | 1,239 | 53 | import sys
root_dir = "/work/yufeng/2022/enzyme_specificity"
sys.path.append(f"{root_dir}/src")
def load_model(config):
from Models.dlkcat import DLKcat
from Models.cpi import CPI
from Models.ss import SS
if config.model.name == 'DLKcat':
return DLKcat(config)
elif config.model.name == 'cpi':
return CPI(config)
elif config.model.name == 'structure_sequence':
return SS(config)
else:
raise ValueError(f'Unknown model name: {config.model.name}')
def increase_count(x, a, cnt = 1):
if a in x:
x[a] += cnt
else:
x[a] = cnt
def get_count(x, a):
if a in x:
return x[a]
else:
return 0
def create_fan_chart(datas, save_dir):
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
plt.clf()
data = datas
labels = np.arange(len(datas))
colors = sns.color_palette('pastel')
plt.pie(data, labels=labels,colors = colors, autopct = '%0.01f%%')
plt.savefig(save_dir)
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def stable_softmax(t, dim = -1):
t = t - t.amax(dim = dim, keepdim = True)
return t.softmax(dim = dim) | Python |
3D | antecede/EZSpecificity | Models/Structure/gnn.py | .py | 1,616 | 44 | from torch_geometric.nn import GATConv
from torch_geometric.nn.conv import PDNConv
import torch_geometric.nn as gnn
import torch.nn as nn
from Models.common import NONLINEARITIES
class GNN(nn.Module):
def __init__(self, config, edge_dim):
super().__init__()
self.convs = nn.ModuleList()
self.use_graph_norm = config.use_graph_norm
self.edge_dim = edge_dim
if self.use_graph_norm:
self.batch_norms = nn.ModuleList()
for i in range(config.num_layers):
self.batch_norms.append(gnn.norm.GraphNorm(config.hidden_dim))
for i in range(config.num_layers):
self.convs.append(self._get_layer(config))
self.activation = NONLINEARITIES[config.act_fn]
def _get_layer(self, config):
if config.layer_name == 'GAT':
return GATConv(config.hidden_dim, config.hidden_dim, heads = config.n_head, dropout = config.dropout, edge_dim = self.edge_dim)
elif config.layer_name == 'PDN':
return PDNConv(config.hidden_dim, config.hidden_dim, hidden_channels=config.hidden_dim, dropout = config.dropout, edge_dim = self.edge_dim)
def forward(self, x, edge_index, edge_attr, batch):
# Run everything through the graph convolutions and activations
for i in range(len(self.convs)):
x = self.convs[i](x = x, edge_index = edge_index, edge_attr = edge_attr)
if self.use_graph_norm:
x = self.batch_norms[i](x, batch=batch)
x = self.activation(x)
# pool with respect to batches
return x | Python |
3D | antecede/EZSpecificity | Models/Structure/__init__.py | .py | 44 | 1 | from Models.Structure.structure import Graph | Python |
3D | antecede/EZSpecificity | Models/Structure/structure.py | .py | 3,332 | 84 | import pytorch_lightning as pl
import torch
import torch.nn as nn
from torch_scatter import scatter
from Models.Structure.gnn import GNN
from Models.Structure.egnn import EGNN
import Datasets.Structure.transforms as trans
def get_encoder(config, edge_dim):
if config.name == 'gnn':
net = GNN(
config=config,
edge_dim=edge_dim
)
elif config.name == 'egnn':
net = EGNN(
config=config,
edge_dim=edge_dim
)
else:
raise ValueError(config.name)
return net
class Graph(nn.Module):
def __init__(self, config, trans_config):
super().__init__()
# Build the network
self.config = config
self.hidden_dim = config.hidden_dim
protein_featurizer = trans.FeaturizeProteinAtom()
ligand_featurizer = trans.FeaturizeLigandAtom()
add_knn_edge_featurizer = trans.EdgeConnection(dist_noise=trans_config.dist_noise, cutoff=trans_config.cutoff, num_r_gaussian=trans_config.num_r_gaussian, k=trans_config.k)
self.protein_atom_emb = nn.Linear(protein_featurizer.feature_dim, self.hidden_dim)
self.ligand_atom_emb = nn.Linear(ligand_featurizer.feature_dim, self.hidden_dim)
if config.mode == 'complex' or config.mode == 'both':
self.encoder_complex = get_encoder(config, edge_dim = add_knn_edge_featurizer.feature_dim)
if config.mode == 'sub' or config.mode == 'both':
self.encoder_sub = get_encoder(config, edge_dim = add_knn_edge_featurizer.feature_dim)
def forward(self, G):
h_protein = self.protein_atom_emb(G.protein_x)
h_ligand = self.ligand_atom_emb(G.ligand_x)
h = h_protein * G.protein_mask[:, None] + h_ligand * G.ligand_mask[:, None]
batch = G.ligand_index_batch
if self.config.mode == 'both' or self.config.mode == 'complex':
h_complex = self.encoder_complex(
x = h,
edge_index = G.complex_edge_index,
edge_attr = G.complex_edge_attr,
batch = batch
) # (N_p+N_l, H)
if self.config.mode == 'both' or self.config.mode == 'sub':
h_sub = self.encoder_sub(
x = h,
edge_index = G.complex_edge_index,
edge_attr = G.complex_edge_attr,
batch = batch
)
if self.config.mode == 'both':
h = h_complex * G.mask_use_complex[:, None] + h_sub * G.mask_use_ligand[:, None]
elif self.config.mode == 'complex':
h = h_complex
elif self.config.mode == 'sub':
h = h_sub
# Aggregate messages
pre_atom_output = (h, batch, G.ligand_index)
output = scatter(h, index=batch, dim=0, reduce='mean') # (N, F)
# output_ligand = scatter(h * G.ligand_mask[:, None], index=batch, dim=0, reduce='sum') / (scatter(G.ligand_mask, index=batch, dim=0, reduce='sum') + 1e-07)[:, None] # (N, F)
# output_protein = scatter(h * G.protein_mask[:, None], index=batch, dim=0, reduce='sum') / (scatter(G.protein_mask, index=batch, dim=0, reduce='sum') + 1e-07)[:, None] # (N, F)
# return output, output_ligand, output_protein, pre_atom_output
return output, pre_atom_output | Python |
3D | antecede/EZSpecificity | Models/Structure/egnn.py | .py | 2,858 | 85 | from torch import nn
import torch
from torch_scatter import scatter_sum, scatter_mean
from Models.common import MLP
class EnBaseLayer(nn.Module):
def __init__(self, hidden_dim, edge_feat_dim, act_fn='relu', norm='None', attention=False, residual=True):
super().__init__()
self.hidden_dim = hidden_dim
self.edge_feat_dim = edge_feat_dim
self.act_fn = act_fn
self.norm = norm
self.attention = attention
self.residual = residual
self.edge_mlp = MLP(2 * hidden_dim + edge_feat_dim, hidden_dim, hidden_dim,
num_layer=2, norm=norm, act_fn=act_fn, act_last=True)
if self.attention:
self.edge_inf = nn.Sequential(nn.Linear(hidden_dim, 1), nn.Sigmoid())
self.node_mlp = MLP(2 * hidden_dim, hidden_dim, hidden_dim, num_layer=2, norm=norm, act_fn=act_fn)
def forward(self, h, edge_index, edge_attr):
"""Forward pass of the linear layer
Args:
h: dict of node-features
x: coordinates
G: minibatch of (homo)graphs
Returns:
tensor with new features [B, n_points, n_features_out]
"""
edge_index = edge_index
src, dst = edge_index
if self.edge_feat_dim > 0:
edge_feat = edge_attr # shape: [#edges_in_batch, #bond_types]
else:
edge_feat = None
hi, hj = h[dst], h[src]
if edge_feat is None:
mij = self.edge_mlp(torch.cat([hi, hj], -1))
else:
mij = self.edge_mlp(torch.cat([edge_feat, hi, hj], -1))
if self.attention:
eij = mij * self.edge_inf(mij)
else:
eij = mij
mi = scatter_sum(eij, dst, dim=0, dim_size=h.shape[0])
h = h + self.node_mlp(torch.cat([mi, h], -1))
return h
class EGNN(nn.Module):
def __init__(self, config, edge_dim):
super().__init__()
# Build the network
self.num_layers = config.num_layers
self.hidden_dim = config.hidden_dim
self.edge_feat_dim = edge_dim
self.act_fn = config.act_fn
self.norm = config.norm
self.attention = config.attention
self.residual = config.residual
self.net = self._build_network()
def _build_network(self):
# Equivariant layers
layers = []
for l_idx in range(self.num_layers):
layer = EnBaseLayer(self.hidden_dim, self.edge_feat_dim, act_fn=self.act_fn, norm=self.norm, attention=self.attention, residual=self.residual)
layers.append(layer)
return nn.ModuleList(layers)
def forward(self, x, edge_index, edge_attr, batch):
edge_index = edge_index
h = x
for interaction in self.net:
h = h + interaction(h, edge_index, edge_attr)
return h | Python |
3D | antecede/EZSpecificity | Datasets/data_representer.py | .py | 16,075 | 431 | import lmdb
import pickle
from torch_geometric.data import Data
import torch.utils.data as data
import os
import torch
import numpy as np
from Datasets.utils import preprocess_enzyme_feature, preprocess_reaction_feature, torchify_dict, load_tensor, load_pickle, get_paths, check_paths_exist
from Datasets.Structure import StructureDataset
def get_representer(config, df, transform=None, is_train=True):
if config.data.representer == 'graph_esm':
return Reaction(config=config, df=df)
elif config.data.representer == 'graph_one-hot':
return Graph_Onehot(config=config, df=df)
elif config.data.representer == 'cpi':
return CPI(config=config, df=df)
elif config.data.representer == 'structure_sequence':
return StructureSequence(config=config, df=df, transform=transform, is_train=is_train)
else:
raise ValueError("The representer is not supported.")
class CPI(data.Dataset):
def __init__(self, config, df) -> None:
super().__init__()
self.config = config
self.df = df
self.enzyme_db = None
self.reaction_db = None
self.valid_idxs = []
self.high_quality_id_dicts = None
self._get_valid_idx()
assert check_paths_exist( self.config.data.enzyme_lmdb_path)
assert check_paths_exist( self.config.data.morgan_path)
def _connect_db(self):
# assert self.db is None, 'A connection has already been opened.'
if self.enzyme_db is None:
self.enzyme_db = [lmdb.open(
path,
map_size=600*(1024*1024*1024), # 600GB
create=False,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
) for path in get_paths(self.config.data.enzyme_lmdb_path)]
self.reaction_db = [
np.load(path, allow_pickle=True)
for path in get_paths(self.config.data.morgan_path)
]
def __len__(self):
return len(self.valid_idxs)
def _get_high_quality_id_dicts(self):
self.high_quality_id_dicts = []
try:
for path in get_paths(self.config.data.high_quality_id_path):
try:
fin = open(path, "r")
self.high_quality_id_dicts.append({int(line.strip()):True for line in fin})
fin.close()
except:
self.high_quality_id_dicts.append(None)
except:
self.high_quality_id_dicts = [None]
def check_high_quality(self, dataset_id, index):
if self.high_quality_id_dicts is None:
self._get_high_quality_id_dicts()
return ((self.high_quality_id_dicts[dataset_id] is None) or (index in self.high_quality_id_dicts[dataset_id]))
def _get_valid_idx(self):
self.valid_idxs = []
for index, (dataset_id, structure_index) in enumerate(zip(self.df["dataset_id"].values, self.df["structure_index"].values)):
if self.check_high_quality(dataset_id, structure_index):
self.valid_idxs.append(index)
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
reation_idx = self.df.loc[idx, 'reaction']
enzyme_idx = self.df.loc[idx, 'enzyme']
dataset_id = self.df.loc[idx, 'dataset_id']
tag = dataset_id
# if int(tag) == 0:
# ecnumbers = self.df.loc[idx, 'ecnumber'].split(".")
# fake_ecnumber = self.df.loc[idx, 'fake_ecnumber'].split(".")
# id = 4
# for i in range(len(ecnumbers)):
# if ecnumbers[i] != fake_ecnumber[i]:
# id = i
# break
# tag = id
# else:
# tag = 5
if self.enzyme_db is None or self.reaction_db is None:
self._connect_db()
reaction_data = self.reaction_db[dataset_id][reation_idx, :][np.newaxis, :]
with self.enzyme_db[dataset_id].begin(write=False) as txn:
key = str(enzyme_idx).encode()
value = txn.get(key)
if value is None:
self.df.drop([idx]).reset_index(drop=True)
return self.__getitem__(idx)
enzyme_data = pickle.loads(value)
return Data(x = torch.tensor(reaction_data), enzyme = torch.tensor(enzyme_data['embedding'].mean(axis=0)[np.newaxis, :]), label=torch.tensor(self.df.loc[idx, 'label'], dtype=torch.float), tag=torch.tensor(tag, dtype=torch.long))
class Graph_Onehot(data.Dataset):
def __init__(self, config, df):
self.config = config
self.df = df
root_dir = config.data.input_dir
self.compounds = load_tensor(root_dir + '/compounds', torch.LongTensor)
self.adjacencies = load_tensor(root_dir + '/adjacencies', torch.FloatTensor)
self.proteins = load_tensor(root_dir + '/proteins', torch.LongTensor)
self.fingerprint_dict = load_pickle(root_dir + '/fingerprint_dict.pickle')
self.word_dict = load_pickle(root_dir + '/sequence_dict.pickle')
def __len__(self):
return self.df.index.shape[0]
def __getitem__(self, idx):
reaction_idx = self.df.loc[idx, 'reaction']
enzyme_idx = self.df.loc[idx, 'enzyme']
label = self.df.loc[idx, 'label']
return Data(x = self.compounds[reaction_idx], adj=self.adjacencies[reaction_idx], enzyme=self.proteins[enzyme_idx], label=torch.tensor(label, dtype=torch.float))
class Reaction(data.Dataset):
def __init__(self, df, config=None) -> None:
super().__init__()
self.df = df
self.config = config
self.enzyme_dbs = None
self.reaction_dbs = None
self.grover_dbs = None
self.morgan_dbs = None
self.db_key_dict = None
self.valid_idx = None
self._get_valid_idx()
if "grover" in self.config.data.features or "grover_mean" in self.config.data.features:
assert check_paths_exist(self.config.data.grover_path)
if "morgan" in self.config.data.features:
assert check_paths_exist(self.config.data.morgan_path)
assert check_paths_exist(self.config.data.enzyme_lmdb_path)
assert check_paths_exist(self.config.data.reaction_lmdb_path)
def _check_valid_idx(self, idx):
reation_idx = self.df.loc[idx, 'reaction']
enzyme_idx = self.df.loc[idx, 'enzyme']
dataset_id = self.df.loc[idx, 'dataset_id']
key = str(reation_idx).encode()
if key not in self.db_key_dict[f"reaction_{dataset_id}"]:
print(f"reaction_{dataset_id} {reation_idx} not in db")
return False
key = str(enzyme_idx).encode()
if key not in self.db_key_dict[f"enzyme_{dataset_id}"]:
print(f"enzyme_{dataset_id} {enzyme_idx} not in db")
return False
if "grover" in self.config.data.atom_features or "grover_mean" in self.config.data.features:
key = str(reation_idx).encode()
if key not in self.db_key_dict[f"grover_{dataset_id}"]:
print(f"grover_{dataset_id} {reation_idx} not in db")
return False
return True
def _get_valid_idx(self):
if self.enzyme_dbs is None or self.reaction_dbs is None:
self._connect_db()
cnt = 0
self.valid_idx = []
for idx in range(self.df.index.shape[0]):
if self._check_valid_idx(idx):
self.valid_idx.append(idx)
else:
cnt += 1
print(f"Invalid reaction data: {cnt}")
def _add_key_dict(self, name, db):
if db is None:
return
with db.begin() as txn:
keys = list(txn.cursor().iternext(values=False))
self.db_key_dict[name] = {k: i for i, k in enumerate(keys)}
return
def _build_key_dict(self):
if ("grover" in self.config.data.atom_features or "grover_mean" in self.config.data.features):
for index, db in enumerate(self.grover_dbs):
self._add_key_dict(f"grover_{index}", db)
for index, db in enumerate(self.enzyme_dbs):
self._add_key_dict(f"enzyme_{index}", db)
for index, db in enumerate(self.reaction_dbs):
self._add_key_dict(f"reaction_{index}", db)
def _connect_db(self):
# assert self.db is None, 'A connection has already been opened.'
if self.grover_dbs is None and ("grover" in self.config.data.atom_features or "grover_mean" in self.config.data.features):
self.grover_dbs = [lmdb.open(
path,
map_size=600*(1024*1024*1024), # 600GB
create=False,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
) for path in get_paths(self.config.data.grover_path)]
if self.morgan_dbs is None and "morgan" in self.config.data.features:
self.morgan_dbs = [np.load(path).astype(np.float32) for path in get_paths(self.config.data.morgan_path)]
if self.enzyme_dbs is None:
self.enzyme_dbs = [lmdb.open(
path,
map_size=600*(1024*1024*1024), # 600GB
create=False,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
) for path in get_paths(self.config.data.enzyme_lmdb_path)]
if self.reaction_dbs is None:
self.reaction_dbs = [lmdb.open(
path,
map_size=600*(1024*1024*1024), # 600GB
create=False,
subdir=False,
readonly=True,
lock=False,
readahead=False,
meminit=False,
) for path in get_paths(self.config.data.reaction_lmdb_path)]
self.db_key_dict = {}
self._build_key_dict()
def __len__(self):
return self.df.index.shape[0]
def getitem_with_real_idx(self, idx):
reation_idx = self.df.loc[idx, 'reaction']
enzyme_idx = self.df.loc[idx, 'enzyme']
dataset_id = self.df.loc[idx, 'dataset_id']
tag = self.df.loc[idx, 'tag']
# if int(tag) == 0:
# ecnumbers = self.df.loc[idx, 'ecnumber'].split(".")
# fake_ecnumber = self.df.loc[idx, 'fake_ecnumber'].split(".")
# id = 4
# for i in range(len(ecnumbers)):
# if ecnumbers[i] != fake_ecnumber[i]:
# id = i
# break
# tag = str(id)
# else:
# tag = str(5)
tag = str(tag)
substrate_features = {}
if self.enzyme_dbs is None or self.reaction_dbs is None:
self._connect_db()
with self.reaction_dbs[dataset_id].begin(write=False) as txn:
key = str(reation_idx).encode()
value = txn.get(key)
reaction_data = pickle.loads(value)
with self.enzyme_dbs[dataset_id].begin(write=False) as txn:
key = str(enzyme_idx).encode()
value = txn.get(key)
enzyme_data = pickle.loads(value)
if "grover" in self.config.data.atom_features or "grover_mean" in self.config.data.features:
with self.grover_dbs[dataset_id].begin(write=False) as txn:
key = str(reation_idx).encode()
value = txn.get(key)
grover_data = pickle.loads(value)
substrate_features["grover_mean"] = torch.from_numpy(grover_data['total_embedding'][np.newaxis, :])
reaction_data["grover"] = torch.from_numpy(grover_data['embedding'])
if "morgan" in self.config.data.features:
substrate_features["morgan"] = torch.from_numpy(self.morgan_dbs[dataset_id][reation_idx][np.newaxis, :])
# padding
reaction_data = preprocess_reaction_feature(reaction_data, self.config.data.max_substrate_length)
enzyme_data = preprocess_enzyme_feature(enzyme_data, self.config.data.max_enzyme_length)
data = ReactionData.from_protein_ligand_dicts(torchify_dict(reaction_data), torchify_dict(enzyme_data), label=torch.from_numpy(np.array(self.df.loc[idx, 'label'], dtype=float)), tag=tag, **substrate_features)
return data
def __getitem__(self, idx):
if self.valid_idx is None:
self._get_valid_idx()
return self.getitem_with_real_idx(self.valid_idx[idx])
class StructureSequence(data.Dataset):
def __init__(self, df, config, transform, is_train) -> None:
super().__init__()
self.df = df
self.config = config
transform = transform
self.sequence_db = Reaction(config=config, df=df)
self.structure_db = StructureDataset(config=config, df=df, transform=transform, is_train=is_train)
self.valid_idx = None
self._get_valid_idx()
def _get_valid_idx(self):
valid_idx_sequence = self.sequence_db.valid_idx
valid_idx_structure = self.structure_db.valid_idx
print(len(valid_idx_sequence))
print(len(valid_idx_structure))
self.valid_idx = list(set(valid_idx_sequence).intersection(set(valid_idx_structure)))
print(len(self.valid_idx))
def __len__(self):
return len(self.valid_idx)
def update_valid_idx(self, valid_idx):
self.valid_idx = valid_idx
def __getitem__(self, idx):
if self.valid_idx is None:
self._get_valid_idx()
idx = self.valid_idx[idx]
sequence_data = self.sequence_db.getitem_with_real_idx(idx)
structure_data = self.structure_db.getitem_with_real_idx(idx)
data = StructureSequenceData.from_sequence_structure(sequence_data, structure_data)
if data.y is not None and data.label is not None:
assert data.y == data.label
data.active_site = None
data.y = None
return data
class ReactionData(Data):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def from_protein_ligand_dicts(protein_dict=None, ligand_dict=None, **kwargs):
instance = ReactionData(**kwargs)
if protein_dict is not None:
for key, item in protein_dict.items():
instance[key] = item
if ligand_dict is not None:
for key, item in ligand_dict.items():
instance[key] = item
for key, item in kwargs.items():
instance[key] = item
return instance
def __inc__(self, key, value, *args, **kwargs):
if key == 'edge_index':
return self['element'].size(0)
else:
return super().__inc__(key, value)
class StructureSequenceData(Data):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def from_sequence_structure(sequence=None, structure=None, **kwargs):
instance = StructureSequenceData(**kwargs)
if sequence is not None:
for key, item in sequence.to_dict().items():
instance[key] = item
if structure is not None:
for key, item in structure.to_dict().items():
instance[key] = item
for key, item in kwargs.items():
instance[key] = item
return instance
def __inc__(self, key, value, *args, **kwargs):
if key == 'edge_index':
return self['element'].size(0)
elif key == 'ligand_index':
return 0
elif key == 'complex_edge_index':
return self['ligand_x'].size(0)
else:
return super().__inc__(key, value, *args, **kwargs) | Python |
3D | antecede/EZSpecificity | Datasets/preprocess_full_brenda.ipynb | .ipynb | 21,592 | 530 | {
"cells": [
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from tqdm import tqdm\n",
"import numpy as np\n",
"import sys\n",
"import re\n",
"import os\n",
"\n",
"root_dir = \"/work/yufeng/2022/enzyme_specificity\"\n",
"\n",
"sys.path.append(f\"{root_dir}/src\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Download enzyme active info from brenda\n",
"See Dataset.utils (download_uniprot_file)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create the reaction features\n",
"See Dataset.utils (get_reaction_feature)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"reaction_df = pd.read_csv(f\"{root_dir}/data/full_brenda/reaction copy.csv\", sep=',')\n",
"\n",
"drop_idx = []\n",
"for index, (reaction, substrate) in enumerate(zip(reaction_df['reactions'].values.tolist(), reaction_df['substrates'].values.tolist())):\n",
" if len(substrate) > 275:\n",
" drop_idx.append(index)\n",
"\n",
"reaction_df = reaction_df.drop(drop_idx).reset_index(drop=True)\n",
"reaction_df.to_csv(f\"{root_dir}/data/full_brenda/reaction.csv\", sep=',', index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create the enzyme features\n",
"See Dataset.utils (get_enzyme_feature)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create positive samples dataset"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv(f\"{root_dir}/data/brenda/data.csv\", sep=',').dropna(subset=['left', 'right', 'uniprot', 'ecnumber'])\n",
"enzyme_df = pd.read_csv(f\"{root_dir}/data/full_brenda/enzymes.csv\", sep=',')\n",
"reaction_df = pd.read_csv(f\"{root_dir}/data/full_brenda/reaction.csv\", sep=',')\n",
"\n",
"uniprot_dict = {uniprot: index for index, uniprot in enumerate(enzyme_df['uniprots'].values.tolist())}\n",
"reaction_dict = {}\n",
"\n",
"for index, (reaction, substrate) in enumerate(zip(reaction_df['reactions'].values.tolist(), reaction_df['substrates'].values.tolist())):\n",
" reaction_dict[reaction] = index\n",
"\n",
"data = {\n",
" 'reaction': [],\n",
" 'enzyme': [],\n",
" 'ecnumber': []\n",
"}\n",
"\n",
"for left, right, uniprot, ecnumber in zip(df['left'], df['right'], df['uniprot'], df['ecnumber']):\n",
" if uniprot in uniprot_dict and left + '>>' + right in reaction_dict:\n",
" data['reaction'].append(reaction_dict[left + '>>' + right])\n",
" data['enzyme'].append(uniprot_dict[uniprot])\n",
" data['ecnumber'].append(ecnumber)\n",
"\n",
"data = pd.DataFrame(data).sample(frac=1).reset_index()\n",
"data.to_csv(f\"{root_dir}/data/full_brenda/positive_data.csv\", index=False)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Calculate max_length of reaction"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"135\n"
]
}
],
"source": [
"import lmdb\n",
"import pickle\n",
"\n",
"reaction_save_lmdb_path = f\"{root_dir}/data/full_brenda/reaction_features.lmdb\"\n",
"db = lmdb.open(\n",
" reaction_save_lmdb_path,\n",
" map_size=10*(1024*1024*1024), # 10GB\n",
" create=False,\n",
" subdir=False,\n",
" readonly=True,\n",
" lock=False,\n",
" readahead=False,\n",
" meminit=False,\n",
")\n",
"with db.begin() as txn:\n",
" keys = list(txn.cursor().iternext(values=False))\n",
"\n",
"max_n_atoms = 0\n",
"\n",
"for key in keys:\n",
" with db.begin(write=False, buffers=True) as txn:\n",
" # key = str(key).encode()\n",
" value = txn.get(key)\n",
" if value is None:\n",
" raise KeyError\n",
" data = pickle.loads(value)\n",
" max_n_atoms = max(max_n_atoms, data['element'].shape[0])\n",
" # break\n",
"print(max_n_atoms)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create grover embedding"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"# 1. Tou Tou is going to create smile only csv\n",
"import pandas as pd\n",
"import os\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/full_brenda/reaction.csv\", sep=',')\n",
"results = [smile for smile in df['substrates']]\n",
"\n",
"for smile in df['substrates']:\n",
" if len(smile) < 275:\n",
" results.append(smile)\n",
" \n",
"data = {\n",
" \"substrates\": results\n",
"}\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(f\"{root_dir}/data/full_brenda/reaction_smiles.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:56] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:58] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:59] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:59] WARNING: not removing hydrogen atom without neighbors\n",
"[00:03:59] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:00] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:01] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:01] WARNING: not removing hydrogen atom without neighbors\n",
" 7%|▋ | 5913/80142 [00:00<00:05, 14162.90it/s][00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
" 10%|█ | 8104/80142 [00:00<00:04, 16777.25it/s][00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:02] WARNING: not removing hydrogen atom without neighbors\n",
" 23%|██▎ | 18377/80142 [00:01<00:05, 11164.82it/s][00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:03] WARNING: not removing hydrogen atom without neighbors\n",
" 45%|████▌ | 36099/80142 [00:02<00:03, 14574.60it/s][00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
" 57%|█████▋ | 46044/80142 [00:03<00:02, 11547.11it/s][00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
" 60%|█████▉ | 47957/80142 [00:03<00:02, 13018.90it/s][00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:05] WARNING: not removing hydrogen atom without neighbors\n",
" 74%|███████▍ | 59636/80142 [00:04<00:01, 13593.61it/s][00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:06] WARNING: not removing hydrogen atom without neighbors\n",
" 96%|█████████▌| 77006/80142 [00:06<00:00, 12030.73it/s][00:04:08] WARNING: not removing hydrogen atom without neighbors\n",
"[00:04:08] WARNING: not removing hydrogen atom without neighbors\n",
"100%|██████████| 80142/80142 [00:06<00:00, 12837.93it/s]\n"
]
},
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 2. Get npz feature\n",
"os.system(f\"python {root_dir}/src/other_softwares/grover_software/scripts/save_features.py --data_path {root_dir}/data/full_brenda/reaction_smiles.csv \\\n",
" --save_path {root_dir}/data/full_brenda/reaction.npz \\\n",
" --features_generator fgtasklabel \\\n",
" --restart\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Building atom vocab from file: /work/yufeng/2022/enzyme_specificity/data/full_brenda/reaction_smiles.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100000it [00:19, 5156.83it/s] \n",
" 0%| | 0/80143 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"atom vocab size 462\n",
"Building bond vocab from file: /work/yufeng/2022/enzyme_specificity/data/full_brenda/reaction_smiles.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100000it [01:26, 1151.32it/s] \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"bond vocab size 573\n"
]
},
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 3. Get build vocab\n",
"os.system(f\"python {root_dir}/src/other_softwares/grover_software/scripts/build_vocab.py --data_path {root_dir}/data/full_brenda/reaction_smiles.csv \\\n",
" --vocab_save_folder {root_dir}/data/full_brenda/grover_vocab \\\n",
" --dataset_name brenda\")\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CUDA_VISIBLE_DEVICES=1 python main.py fingerprint --data_path /work/yufeng/2022/enzyme_specificity/data/full_brenda/reaction_smiles.csv --features_path /work/yufeng/2022/enzyme_specificity/data/full_brenda/reaction.npz --checkpoint_path /work/yufeng/2022/enzyme_specificity/data/pretrain_model/grover_large.pt --fingerprint_source both --output /work/yufeng/2022/enzyme_specificity/data/full_brenda/fingerprint.npz --save_lmdb_path /work/yufeng/2022/enzyme_specificity/data/full_brenda/grover_fingerprint.lmdb --fingerprint_source both\n"
]
}
],
"source": [
"# 4. Get fingerprint\n",
"print(f\"CUDA_VISIBLE_DEVICES=1 python main.py fingerprint --data_path {root_dir}/data/full_brenda/reaction_smiles.csv --features_path {root_dir}/data/full_brenda/reaction.npz --checkpoint_path {root_dir}/data/pretrain_model/grover_large.pt --fingerprint_source both --output {root_dir}/data/full_brenda/fingerprint.npz --save_lmdb_path {root_dir}/data/full_brenda/grover_fingerprint.lmdb --fingerprint_source both\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create morgan embedding"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[00:30:01] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:01] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:01] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:01] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:02] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:11] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:24] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:24] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:31] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:32] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:32] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:32] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:33] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:33] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:33] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:33] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:33] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:42] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:55] WARNING: not removing hydrogen atom without neighbors\n",
"[00:30:55] WARNING: not removing hydrogen atom without neighbors\n"
]
}
],
"source": [
"from rdkit.Chem import AllChem\n",
"from rdkit import Chem\n",
"\n",
"path = f\"{root_dir}/data/full_brenda/reaction_smiles.csv\"\n",
"df = pd.read_csv(path, sep=',')\n",
"results = []\n",
"for smile in df['substrates']:\n",
" m1 = Chem.MolFromSmiles(smile)\n",
" result = np.array(AllChem.GetMorganFingerprintAsBitVect(m1,2,nBits=1024))\n",
" results.append(result)\n",
"np.save(f\"{root_dir}/data/full_brenda/morgan_fingerprint.npy\", np.array(results))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Select data points for halogenase"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/work/yufeng/miniconda/envs/revae/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"100%|██████████| 186867/186867 [00:04<00:00, 38283.43it/s]\n",
"100%|██████████| 162041/162041 [00:04<00:00, 36509.81it/s]\n",
"100%|██████████| 18420/18420 [00:00<00:00, 39344.17it/s]\n",
"100%|██████████| 11796/11796 [00:00<00:00, 39421.54it/s]\n",
"0it [00:00, ?it/s]\n"
]
}
],
"source": [
"from Datasets.utils import generate_negative_sample\n",
"from easydict import EasyDict\n",
"ecnumbers = [\"1.11.1.10\", \"1.11.1.-\", \"1.11.1.18\", \"1.14.19.9\", \"1.14.14.-\",\"1.14.19.56\", \"1.14.19.-\", \"1.14.99.-\", \"1.14.19.49\", \"3.8.1.1\", \"1.14.20.-\", \"2.5.1.94\", \"2.2.1.6\", \"2.5.1.63\", \"3.13.1.8\", \"2.5.1.-\"]\n",
"n_digit = 0\n",
"\n",
"for n_digit in range(0, 5):\n",
" config = {\n",
" \"data\": {\n",
" \"sampling\": {\n",
" \"same_digits\": 0,\n",
" \"num_negative_enzyme\": 2\n",
" }\n",
" }\n",
" }\n",
" config = EasyDict(config)\n",
"\n",
" def get_number_same_digits(ecnumber1, ecnumber2):\n",
" ecnumber_digits1 = ecnumber1.split(\".\")\n",
" ecnumber_digits2 = ecnumber2.split(\".\")\n",
" for index, (digit1, digit2) in enumerate(zip(ecnumber_digits1, ecnumber_digits2)):\n",
" if digit1 == '-' or digit2 == '-':\n",
" continue\n",
" if digit1 != digit2:\n",
" return index\n",
" return 4\n",
"\n",
" df = pd.read_csv(f\"{root_dir}/data/full_brenda/positive_data.csv\", sep=',')\n",
"\n",
" drop_indexs = []\n",
" for index, ecnumber in enumerate(df['ecnumber']):\n",
" flag = False\n",
" for ecnumber in ecnumbers:\n",
" if get_number_same_digits(ecnumber, df['ecnumber'][index]) >= n_digit:\n",
" flag = True\n",
" break\n",
" if not flag or n_digit == 4:\n",
" drop_indexs.append(index)\n",
" df = df.drop(drop_indexs).reset_index(drop=True)\n",
" df = generate_negative_sample(config, df)\n",
" df.to_csv(f\"{root_dir}/data/halogenase/full_brenda_data_{n_digit}.csv\", sep=',', index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.7.13 ('revae')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.13"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "ea8e2fe48c7ffefb1d2d9f58e61432847d99d21375144a203023aa806149d953"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
3D | antecede/EZSpecificity | Datasets/create_dataset.py | .py | 2,671 | 74 | import random
import pandas as pd
from tqdm import tqdm
import time
root_dir = "/projects/bbto/suyufeng/enzyme_specificity"
def check(ecnumber_dict, enzyme1, enzyme2, digits):
ecnumber1s = ecnumber_dict[enzyme1].split('.')
ecnumber2s = ecnumber_dict[enzyme2].split('.')
for index, (ecnumber1, ecnumber2) in enumerate(zip(ecnumber1s, ecnumber2s)):
if index == int(digits):
return True
if ecnumber1 != ecnumber2 and ecnumber1!='-' and ecnumber2!='-':
return False
return True
def main():
n_negative_sample = 1
df = pd.read_csv(f"{root_dir}/data/positive_data.csv", sep=',')
# calculate dict of (reaction -> [enzymes])
reaction_dict = {}
# ecnumber_dict of (enzyme -> [ecnumbers])
ecnumber_dict = {}
for reaction, enzyme, ecnumber in zip(df['reaction'], df['enzyme'], df['ecnumber']):
if reaction not in reaction_dict:
reaction_dict[reaction] = []
reaction_dict[reaction].append((enzyme, ecnumber))
if enzyme in ecnumber_dict:
ecnumber_dict[enzyme].append(ecnumber)
else:
ecnumber_dict[enzyme] = [ecnumber]
for key in reaction_dict:
reaction_dict[key] = {enzyme: True for enzyme, ecnumber in list(set(reaction_dict[reaction]))}
# get all list of enzymes
full_enzymes = list(set(df['enzyme'].values.tolist()))
# create negative data and positive data
data = {
'reaction': [],
'enzyme': [],
'ecnumber': [],
'label': []
}
for reaction, enzyme, ecnumber in tqdm(zip(df['reaction'], df['enzyme'], df['ecnumber']), total=df['enzyme'].shape[0]):
# positive sample
data['reaction'].append(reaction)
data['enzyme'].append(enzyme)
data['ecnumber'].append(ecnumber)
data['label'].append(1)
# negative sample
start = time.time()
current_n_negative_sample = 0
while current_n_negative_sample < n_negative_sample and time.time() - start < 10:
negative_enzyme = random.choice(full_enzymes)
if negative_enzyme not in reaction_dict[reaction]:
data['reaction'].append(reaction)
data['enzyme'].append(negative_enzyme)
data['ecnumber'].append(ecnumber)
data['label'].append(0)
current_n_negative_sample += 1
# save data
data = pd.DataFrame(data).sample(frac=1).reset_index(drop=True)
# data = pd.DataFrame(data).reset_index(drop=True)
data.to_csv(f"{root_dir}/data/brenda/data.csv", index=False)
if __name__ == "__main__":
main()
# generate_negative_data() | Python |
3D | antecede/EZSpecificity | Datasets/utils.py | .py | 13,610 | 383 | import numpy as np
import torch
from tqdm import tqdm
from torch_scatter import scatter
import rdkit
import random
from rdkit import Chem
from rdkit.Chem.rdchem import BondType, HybridizationType
import pandas as pd
import pickle, os
from Datasets.const import restype_3to1, restype_name_to_atom14_names, letter_to_num
def get_paths(data):
if type(data) == str:
return [data]
else:
return data
def check_paths_exist(data):
paths = get_paths(data)
for path in paths:
if not os.path.exists(path):
return False
return True
def read_datasets(data):
result = []
dataset_id = []
for index, path in enumerate(get_paths(data)):
df = pd.read_csv(path, sep=',')
result.append(df)
dataset_id.extend([index] * len(result[-1].index))
df = pd.concat(result, ignore_index=True).reset_index(drop=True)
df['dataset_id'] = dataset_id
df['tag'] = dataset_id
return df
def get_atom_coord(residue):
atoms = restype_name_to_atom14_names[residue.get_resname()]
coords = []
mask = []
for atom in atoms:
if len(atom) == 0 or (not residue.has_id(atom)):
coords.append(np.array([0, 0, 0]))
mask.append(0)
else:
coords.append(residue[atom].get_coord())
mask.append(1)
return np.array(coords), np.array(mask)
def get_res_coord(residues):
coords = []
masks = []
for residue in residues:
coord, mask = get_atom_coord(residue)
coords.append(coord)
masks.append(mask)
return np.array(coords)
def _normalize(tensor, dim=-1):
'''
Normalizes a `torch.Tensor` along dimension `dim` without `nan`s.
'''
return torch.nan_to_num(
torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))
def _rbf(D, D_min=0., D_max=20., D_count=16, device='cpu'):
'''
From https://github.com/jingraham/neurips19-graph-protein-design
Returns an RBF embedding of `torch.Tensor` `D` along a new axis=-1.
That is, if `D` has shape [...dims], then the returned tensor will have
shape [...dims, D_count].
'''
D_mu = torch.linspace(D_min, D_max, D_count, device=device)
D_mu = D_mu.view([1, -1])
D_sigma = (D_max - D_min) / D_count
D_expand = torch.unsqueeze(D, -1)
RBF = torch.exp(-((D_expand - D_mu) / D_sigma) ** 2)
return RBF
def check_residue(residue):
if residue.get_id()[0] != ' ':
return False
if not residue.get_resname() in restype_3to1:
return False
return True
def get_residue(chain):
residues = []
seqs = ""
for residue in chain.get_residues():
if check_residue(residue):
residues.append(residue)
seqs+=restype_3to1[residue.get_resname()]
return residues, seqs
def change_inchi_to_smile(inchi):
mol = rdkit.Chem.inchi.MolFromInchi(inchi)
if mol is None:
return "-"
else:
return Chem.MolToSmiles(mol)
def add_dataset_id(df, id):
df['dataset_id'] = np.array([id] * len(df.index))
return df
def generate_negative_sample(df, config=None, same_digits=None, num_negative_enzyme=None, random_negative_digit=None, has_positive_sample=True):
if config is None:
assert same_digits is not None and num_negative_enzyme is not None
else:
same_digits = config.data.sampling.same_digits
num_negative_enzyme = config.data.sampling.num_negative_enzyme
print("Start generating negative samples!")
# calculate dict of (reaction -> [enzymes])
reaction_dict = {}
# enzyme_dict of (enzyme -> [ecnumbers])
enzyme_dict = {}
# ecnumber_dict of (ecnumber -> [enzymes])
ecnumber_dict = {}
# negative_dict of ([reaction, enzyme] -> true,false)
negative_dict = {}
for reaction, enzyme, ecnumber in zip(df['reaction'], df['enzyme'], df['ecnumber']):
if reaction not in reaction_dict:
reaction_dict[reaction] = []
reaction_dict[reaction].append((enzyme, ecnumber))
if enzyme in enzyme_dict:
enzyme_dict[enzyme].append(ecnumber)
else:
enzyme_dict[enzyme] = [ecnumber]
for i in range(0, 5):
chuncked_ecnumber = ".".join(ecnumber.split(".")[:i])
if chuncked_ecnumber not in ecnumber_dict:
ecnumber_dict[chuncked_ecnumber] = []
ecnumber_dict[chuncked_ecnumber].append(enzyme)
for key in reaction_dict:
reaction_dict[key] = {enzyme: True for enzyme, ecnumber in list(set(reaction_dict[key]))}
# create negative data and positive data
data = {
'reaction': [],
'enzyme': [],
'ecnumber': [],
'fake_ecnumber': [],
'label': []
}
for reaction, enzyme, ecnumber in tqdm(zip(df['reaction'], df['enzyme'], df['ecnumber']), total=df['enzyme'].shape[0]):
# positive sample
if has_positive_sample:
data['reaction'].append(reaction)
data['enzyme'].append(enzyme)
data['ecnumber'].append(ecnumber)
data['fake_ecnumber'].append(ecnumber)
data['label'].append(1)
# negative sample
# determine the difficulty of negative sample
if random_negative_digit is None:
negative_same_digit = same_digits
else:
negative_same_digit = random.randint(0, same_digits+1)
current_n_negative_sample = 0
sample_enzymes = ecnumber_dict[".".join(ecnumber.split(".")[:negative_same_digit])]
if len(sample_enzymes) > num_negative_enzyme * 10:
sample_enzymes = random.sample(sample_enzymes, num_negative_enzyme * 10)
random.shuffle(sample_enzymes)
for negative_enzyme in sample_enzymes:
if current_n_negative_sample < num_negative_enzyme:
if negative_enzyme not in reaction_dict[reaction] and (negative_enzyme, reaction) not in negative_dict:
data['reaction'].append(reaction)
data['enzyme'].append(negative_enzyme)
data['fake_ecnumber'].append(enzyme_dict[negative_enzyme][0])
data['ecnumber'].append(ecnumber)
data['label'].append(0)
negative_dict[(negative_enzyme, reaction)] = True
current_n_negative_sample += 1
data = pd.DataFrame(data)
return data
def download_uniprot_file():
import re
import requests
from requests.adapters import HTTPAdapter, Retry
re_next_link = re.compile(r'<(.+)>; rel="next"')
retries = Retry(total=5, backoff_factor=0.25, status_forcelist=[500, 502, 503, 504])
session = requests.Session()
session.mount("https://", HTTPAdapter(max_retries=retries))
def get_next_link(headers):
if "Link" in headers:
match = re_next_link.match(headers["Link"])
if match:
return match.group(1)
def get_batch(batch_url):
while batch_url:
response = session.get(batch_url)
response.raise_for_status()
total = response.headers["x-total-results"]
yield response, total
batch_url = get_next_link(response.headers)
url = 'https://rest.uniprot.org/uniprotkb/search?compressed=false&fields=accession%2Creviewed%2Cid%2Clength%2Cft_act_site%2Cft_binding&format=tsv&query=%28%28ftlen_act_site%3A%5B%2A%20TO%2010000%5D%29%29&size=500'
progress = 0
with open(f"{root_dir}/data/brenda/raw_data/uniprot/importantsites.tsv", 'w') as f:
for batch, total in get_batch(url):
lines = batch.text.splitlines()
if not progress:
print(lines[0], file=f)
for line in lines[1:]:
print(line, file=f)
progress += len(lines[1:])
print(f'{progress} / {total}', end='\r')
def get_ligand_atom_features(rdmol):
num_atoms = rdmol.GetNumAtoms()
atomic_number = []
aromatic = []
sp, sp2, sp3 = [], [], []
degree = []
index_to_newindex_dict = {}
for index, atom in enumerate(rdmol.GetAtoms()):
atom_idx = atom.GetIdx()
index_to_newindex_dict[atom_idx] = index
atom = rdmol.GetAtomWithIdx(atom_idx)
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
sp.append(1 if hybridization == HybridizationType.SP else 0)
sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
degree.append(atom.GetDegree())
node_type = torch.tensor(atomic_number, dtype=torch.long)
row, col = [], []
for bond in rdmol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
start = index_to_newindex_dict[start]
end = index_to_newindex_dict[end]
row += [start, end]
col += [end, start]
row = torch.tensor(row, dtype=torch.long)
col = torch.tensor(col, dtype=torch.long)
hs = (node_type == 1).to(torch.float)
num_hs = scatter(hs[row], col, dim_size=num_atoms).numpy()
feat_mat = np.array([atomic_number, aromatic, degree, num_hs, sp, sp2, sp3], dtype=np.compat.long).transpose()
return feat_mat
def parse_smile(string):
mol = Chem.MolFromSmiles(string)
tmp_mol = Chem.MolFromSmiles(string)
Chem.SanitizeMol(tmp_mol)
for atom in tmp_mol.GetAtoms():
atom.SetAtomMapNum(0)
canonical_mol = Chem.MolFromSmiles(Chem.MolToSmiles(tmp_mol))
mapping_idx = list(map(int, tmp_mol.GetProp("_smilesAtomOutputOrder")[1:-2].split(",")))
for atom in canonical_mol.GetAtoms():
atom.SetAtomMapNum(mol.GetAtomWithIdx(mapping_idx[atom.GetIdx()]).GetAtomMapNum())
mol = canonical_mol
feat_mat = get_ligand_atom_features(mol)
ptable = Chem.GetPeriodicTable()
num_atoms = mol.GetNumAtoms()
num_bonds = mol.GetNumBonds()
element = []
index_to_newindex_dict = {}
newindex_to_index_dict = {}
for index in range(mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(index)
index_to_newindex_dict[atom.GetAtomMapNum()] = atom.GetIdx()
newindex_to_index_dict[atom.GetIdx()] = atom.GetAtomMapNum()
atomic_number = atom.GetAtomicNum()
element.append(atomic_number)
element = np.array(element, dtype=np.compat.long)
row, col, edge_type = [], [], []
BOND_TYPES = {t: i for i, t in enumerate(BondType.names.values())}
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = np.array([row, col], dtype=np.compat.long)
edge_type = np.array(edge_type, dtype=np.compat.long)
perm = (edge_index[0] * num_atoms + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
data = {
'element': element,
'edge_index': edge_index,
'edge_type': edge_type,
'atom_feature': feat_mat
}
return data, index_to_newindex_dict, newindex_to_index_dict, mol
def check_smile_equal(smilea, smileb):
# smilea will contain index and smileb is clean smile
a = Chem.MolFromSmiles(smilea)
for atom in a.GetAtoms():
atom.SetAtomMapNum(0)
a = Chem.CanonSmiles(Chem.MolToSmiles(a))
b = Chem.CanonSmiles(smileb)
return a == b
def load_pickle(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
def load_tensor(file_name, dtype):
return [dtype(d) for d in np.load(file_name + '.npy', allow_pickle=True)]
def get_neighbor_list(data, idx, dict):
neighbor_list = []
for i in range(data["edge_index"].shape[1]):
if data["edge_index"][0][i] == idx and data["edge_index"][1][i] in dict:
neighbor_list.append((data["edge_index"][1][i], data["edge_type"][i]))
return neighbor_list
def convert_protein_sequence_to_number(protein_sequence):
protein_number = []
for i in protein_sequence:
protein_number.append(letter_to_num[i])
return np.array(protein_number)
def torchify_dict(data):
output = {}
for k, v in data.items():
if isinstance(v, np.ndarray):
output[k] = torch.from_numpy(v)
else:
output[k] = v
return output
def deredundant(values):
# avoid using set(list) because it will change the order
results = []
value_dict = {}
for value in values:
if value not in value_dict:
value_dict[value] = 1
results.append(value)
return np.array(results)
def preprocess_reaction_feature(data, max_reaction_length):
data['reaction_padding_mask'] = np.zeros((max_reaction_length), dtype=bool)[None, ...]
data['reaction_padding_mask'][0, data['element'].shape[0]:] = True
if "grover" in data:
data['grover'] = np.pad(data['grover'], ((0, max_reaction_length - data['grover'].shape[0]), (0, 0)), 'constant', constant_values=0)
return data
def preprocess_enzyme_feature(data, max_enzyme_length):
original_length = data['embedding'].shape[0]
data['enzyme_padding_mask'] = np.zeros((max_enzyme_length), dtype=bool)[None, ...]
data['enzyme_padding_mask'][0, data['embedding'].shape[0]:] = True
data['embedding'] = np.pad(data['embedding'], ((0, max_enzyme_length - original_length), (0, 0)), 'constant', constant_values=0)
return data
if __name__ == "__main__":
pass | Python |
3D | antecede/EZSpecificity | Datasets/create_original_brenda_dataset.ipynb | .ipynb | 13,935 | 425 | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from tqdm import tqdm\n",
"import numpy as np\n",
"import sys\n",
"root_dir = \"/projects/bbhh/suyufeng/enzyme_specificity\"\n",
"sys.path.append(f\"{root_dir}/src\")\n",
"reduced = False"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Get ecnumbers list"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Conver rawcsv file to csv file\n",
"t = open(f\"{root_dir}/data/brenda/raw_data/ecnumbers.csv\", \"r\")\n",
"data = {\n",
" \"ecnumbers\": []\n",
"}\n",
"for index, i in enumerate(t):\n",
" if index % 4 == 0:\n",
" data['ecnumbers'].append(i.strip())\n",
"data = pd.DataFrame(data=data)\n",
"data.to_csv(f\"{root_dir}/data/brenda/ecnumbers.csv\", sep=',', index=False)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Download Reaction\n",
"python Datasets/Downloads/brenda_crawler.py"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Build dataset\n",
" 1. Reaction\n",
" left smile, right smile, substrate\n",
" 2. Sequence\n",
" 3. Organism\n",
" 4. Ecnumber"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Build Ligand name -> smile dict\n",
" 1. Ligand name -> Brenda_ID\n",
" 2. Brenda_id -> CHEBI_ID\n",
" 2. CHEBI_ID -> InChI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Build dictionary name -> breada_id\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/ecnumbers.csv\", sep=',')\n",
"ecnumbers = df['ecnumbers'].values\n",
"# print(ecnumbers)\n",
"\n",
"ligand_n2i = {}\n",
"for ecnumber in tqdm(ecnumbers):\n",
" # ecnumber = ecnumber.decode('utf-8-sig')\n",
" try:\n",
" if ecnumber[0].isdigit():\n",
" ecnumber = ecnumber.strip()\n",
" else:\n",
" ecnumber = ecnumber.strip()[1:]\n",
" df = pd.read_csv(f\"{root_dir}/data/brenda/raw_data/ligand/{ecnumber}.csv\", sep='\\t', header=None)\n",
" # print(ec_df)\n",
" for name, inchi_key in zip(df[0], df[4]):\n",
" # if name in ligand_n2b:\n",
" # ligand_n2b[name].append(chebiid)\n",
" # else:\n",
" if inchi_key == '-':\n",
" continue\n",
" ligand_n2i[name] = inchi_key\n",
" # break\n",
" except Exception:\n",
" # print(ecnumber)\n",
" # break\n",
" pass"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Build Dataset (Or reduced dataset)\n",
" 1. Only keep the datapoint with verified ecnumber."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from Datasets.utils import change_inchi_to_smile\n",
"from rdkit import RDLogger \n",
"from Bio import SeqIO\n",
"import pandas as pd\n",
"\n",
"def get_active_site(str):\n",
" active_sites = []\n",
" if type(str) == float:\n",
" return active_sites\n",
" for active_string in str.split(\";\"):\n",
" if \"ACT_SITE\" in active_string:\n",
" active_sites.append(int(active_string.strip(\"ACT_SITE \")))\n",
" return active_sites\n",
"\n",
"t = pd.read_csv(f\"{root_dir}/data/brenda/raw_data/uniprot/importantsites.tsv\", sep='\\t')\n",
"\n",
"activesites_dict = {}\n",
"reviewed_dict = {}\n",
"\n",
"for review, entry, active in tqdm(zip(t['Reviewed'], t['Entry'], t['Active site'])):\n",
" activesites = get_active_site(active)\n",
" activesites_dict[entry] = \";\".join([str(i) for i in activesites])\n",
" reviewed_dict[entry] = review\n",
"\n",
"RDLogger.DisableLog('rdApp.*') \n",
"\n",
"def name_to_smile(name):\n",
" names = name.split(\" \")\n",
" if len(names) == 2 and names[0].isdigit():\n",
" times, name = names\n",
" times = int(times)\n",
" try:\n",
" smile = change_inchi_to_smile(ligand_n2i[name])\n",
" return smile\n",
" except Exception:\n",
" pass\n",
" assert False\n",
"\n",
"has_structure = {}\n",
"fin = open(f\"{root_dir}/data/brenda/uniprot_in_alphafill.txt\", \"r\")\n",
"for line in fin:\n",
" has_structure[line.strip()] = True\n",
"\n",
"bad_cnt = 0\n",
"cnt = 0\n",
"dfs = None\n",
"ecnumbers = pd.read_csv(f\"{root_dir}/data/brenda/ecnumbers.csv\", sep=',')['ecnumbers'].values\n",
"bar = tqdm(ecnumbers)\n",
"\n",
"for ecnumber in bar:\n",
" try:\n",
" ligand_df = pd.read_csv(f\"{root_dir}/data/brenda/raw_data/reaction/{ecnumber}.csv\", sep=',')\n",
" enzyme_df = pd.read_csv(f\"{root_dir}/data/brenda/raw_data/enzyme/{ecnumber}.csv\", sep=',')\n",
" if reduced:\n",
" indexs = []\n",
" for index, (uniprot, sequence) in enumerate(enzyme_df['uniprot'], enzyme_df['sequence']):\n",
" if uniprot not in reviewed_dict or reviewed_dict[uniprot] != 'reviewed':\n",
" indexs.append(index)\n",
" # print(enzyme_df)\n",
" enzyme_df = enzyme_df.drop(indexs).reset_index(drop=True)\n",
" except Exception:\n",
" continue\n",
" \n",
" # try to random sample one enzyme for each organism (enzyme with structure will have a higher probability to be sampled)\n",
" organsim_dict = {}\n",
" for index, (organism, uniprot) in enumerate(zip(enzyme_df['organism'], enzyme_df['uniprot'])):\n",
" if organism in organsim_dict:\n",
" organsim_dict[organism].append((index, uniprot))\n",
" else:\n",
" organsim_dict[organism] = [(index, uniprot)]\n",
"\n",
" drop_idx = []\n",
" for organism, values in organsim_dict.items():\n",
" \n",
" sample_idxs = []\n",
" for index, uniprot in values:\n",
" if uniprot in has_structure and (uniprot in activesites_dict and activesites_dict[uniprot] != \"\"):\n",
" sample_idxs.append((index, uniprot))\n",
" \n",
" if len(sample_idxs) == 0:\n",
" sample_idxs = values\n",
"\n",
" represent_enzyme_index = sample_idxs[np.random.choice(len(sample_idxs))][0]\n",
"\n",
" for index, uniprot in values:\n",
" if index != represent_enzyme_index:\n",
" drop_idx.append(index)\n",
"\n",
" print(enzyme_df.index.shape[0])\n",
" enzyme_df = enzyme_df.drop(drop_idx).reset_index(drop=True)\n",
" print(enzyme_df.index.shape[0])\n",
" print(\"--\")\n",
" df = pd.merge(ligand_df, enzyme_df, how='inner', left_on=['organism'], right_on=['organism'])\n",
" data = {\n",
" \"left\": [],\n",
" \"right\": [],\n",
" \"substrate\": [],\n",
" \"ecnumber\": [],\n",
" \"organism\": [],\n",
" \"uniprot\": [], \n",
" \"sequence\": [],\n",
" \"activesites\": []\n",
" }\n",
" left_smiles = []\n",
" right_smiles = []\n",
" for reaction, substrate, organism, sequence, uniprot in zip(df['reaction'].values, df['substrate'].values, df['organism'].values, \n",
" df['sequence'].values, df['uniprot'].values):\n",
" try:\n",
" cnt += 1\n",
" smiles = reaction.split(\" = \")\n",
" assert len(smiles) == 2\n",
" left, right = smiles\n",
" \n",
" def convert(smile):\n",
" names = smile.strip().split(\" + \")\n",
" ans = []\n",
" for name in names:\n",
" if name == '?':\n",
" continue\n",
" ans.append(name_to_smile(name))\n",
" return ans\n",
" \n",
" left = convert(left)\n",
" right = convert(right)\n",
" substrate = name_to_smile(substrate)\n",
" data[\"left\"].append(\".\".join(left))\n",
" data[\"right\"].append(\".\".join(right))\n",
" data['substrate'].append(substrate)\n",
" data['ecnumber'].append(ecnumber)\n",
" data['organism'].append(organism)\n",
" data['sequence'].append(sequence)\n",
" data['uniprot'].append(uniprot)\n",
" if uniprot not in activesites_dict:\n",
" data['activesites'].append(\"\")\n",
" else:\n",
" data['activesites'].append(activesites_dict[uniprot])\n",
" except AssertionError:\n",
" bad_cnt += 1\n",
" pass\n",
" df = pd.DataFrame(data=data)\n",
" if dfs is None:\n",
" dfs = df\n",
" else:\n",
" dfs = pd.concat([dfs, df])\n",
" # print(dfs)\n",
" # break\n",
" bar.set_postfix(n_skipped=bad_cnt, n_all=cnt, skip_ratio=bad_cnt/(cnt+1))\n",
"if reduced:\n",
" dfs.to_csv(f\"{root_dir}/data/brenda/reduced_data.csv\", sep=',')\n",
"else:\n",
" dfs.to_csv(f\"{root_dir}/data/brenda/data.csv\", sep=',')\n",
"# print()\n",
"print(f\"{bad_cnt}/{cnt}\")\n",
" # except Exception:\n",
" # pass\n",
"# print(n_has_structure)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Add cofactor informoation into the files"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 8281/8281 [00:41<00:00, 201.60it/s]\n",
"254208it [00:17, 14811.09it/s] \n"
]
}
],
"source": [
"import pandas as pd\n",
"import glob\n",
"import os\n",
"from tqdm import tqdm\n",
"from rdkit import Chem\n",
"\n",
"root_dir = \"/projects/bbhh/suyufeng/enzyme_specificity\"\n",
"\n",
"def change_inchi_to_smile(inchi):\n",
" try:\n",
" mol = Chem.inchi.MolFromInchi(inchi)\n",
" if mol is None:\n",
" return \"-\"\n",
" else:\n",
" return Chem.MolToSmiles(mol)\n",
" except:\n",
" return \"-\"\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/data.csv\", sep=',')\n",
"\n",
"cofactors = []\n",
"\n",
"cofactor_dict = {}\n",
"# Build cofactor dict\n",
"\n",
"for name in tqdm(glob.glob(f\"{root_dir}/data/brenda/raw_data/ligand/*.csv\")):\n",
" # print(name)\n",
" try:\n",
" t = pd.read_csv(name, sep='\\t', header=None)\n",
" ecnumber = os.path.basename(name).strip('.csv')\n",
"\n",
" for name, type, inchi in zip(t[0], t[2], t[4]):\n",
" if 'Cofactor' in type:\n",
" smile = change_inchi_to_smile(inchi)\n",
" if smile != '-':\n",
" cofactor_dict[(ecnumber, smile)] = name\n",
" except:\n",
" pass\n",
"\n",
"for ecnumber, left, substrate in tqdm(zip(df['ecnumber'], df['left'], df['substrate'])):\n",
" cofactor = []\n",
" for ligand in left.split(\".\"):\n",
" try:\n",
" if ligand != '-' and ligand != substrate and (ecnumber, ligand) in cofactor_dict:\n",
" # print(cofactor_dict[(ecnumber, ligand)])\n",
" cofactor.append(cofactor_dict[(ecnumber, Chem.MolToSmiles(Chem.MolFromSmiles(ligand)))])\n",
" except:\n",
" pass\n",
" cofactors.append(\";\".join(cofactor))\n",
"\n",
"df['cofactor'] = cofactors\n",
"\n",
"df.to_csv(f\"{root_dir}/data/brenda/data_cofactor.csv\", index=False)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Get esm embedding for enzyme\n",
" 1. Run get_embedding.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Delete unavailable datapoints\n",
" 1. Delete substrate is '-'\n",
" 2. Ems_embedding is not available"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.7.13 ('revae')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.13"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "ea8e2fe48c7ffefb1d2d9f58e61432847d99d21375144a203023aa806149d953"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
3D | antecede/EZSpecificity | Datasets/preprocess.ipynb | .ipynb | 33,006 | 838 | {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from tqdm import tqdm\n",
"import numpy as np\n",
"import sys\n",
"import re\n",
"import os\n",
"\n",
"root_dir = \"/projects/bbhh/suyufeng/enzyme_specificity\"\n",
"\n",
"sys.path.append(f\"{root_dir}/src\")\n",
"sys.path.append(f\"{root_dir}/src/other_softwares/grover_software\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Download enzyme active info from brenda\n",
"See Dataset.utils (download_uniprot_file)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create the reaction features\n",
"See Dataset.utils (get_reaction_feature)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create the enzyme features\n",
"See Dataset.utils (get_enzyme_feature)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create positive samples dataset"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv(f\"{root_dir}/data/brenda/data_cofactor.csv\", sep=',').dropna(subset=['left', 'right', 'uniprot', 'ecnumber'])\n",
"enzyme_df = pd.read_csv(f\"{root_dir}/data/brenda/enzymes.csv\", sep=',')\n",
"reaction_df = pd.read_csv(f\"{root_dir}/data/brenda/reaction.csv\", sep=',')\n",
"\n",
"uniprot_dict = {uniprot: index for index, uniprot in enumerate(enzyme_df['uniprots'].values.tolist())}\n",
"reaction_dict = {reaction: index for index, reaction in enumerate(reaction_df['reactions'].values.tolist())}\n",
"\n",
"data = {\n",
" 'reaction': [],\n",
" 'enzyme': [],\n",
" 'ecnumber': []\n",
"}\n",
"\n",
"for left, right, uniprot, ecnumber in zip(df['left'], df['right'], df['uniprot'], df['ecnumber']):\n",
" if uniprot in uniprot_dict and left + '>>' + right in reaction_dict:\n",
" data['reaction'].append(reaction_dict[left + '>>' + right])\n",
" data['enzyme'].append(uniprot_dict[uniprot])\n",
" data['ecnumber'].append(ecnumber)\n",
"\n",
"data = pd.DataFrame(data).sample(frac=1).reset_index()\n",
"data.to_csv(f\"{root_dir}/data/positive_data.csv\", index=False)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Calculate max_length of reaction"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"135\n"
]
}
],
"source": [
"import lmdb\n",
"import pickle\n",
"\n",
"reaction_save_lmdb_path = f\"{root_dir}/data/brenda/reaction_features.lmdb\"\n",
"db = lmdb.open(\n",
" reaction_save_lmdb_path,\n",
" map_size=10*(1024*1024*1024), # 10GB\n",
" create=False,\n",
" subdir=False,\n",
" readonly=True,\n",
" lock=False,\n",
" readahead=False,\n",
" meminit=False,\n",
")\n",
"with db.begin() as txn:\n",
" keys = list(txn.cursor().iternext(values=False))\n",
"\n",
"max_n_atoms = 0\n",
"\n",
"for key in keys:\n",
" with db.begin(write=False, buffers=True) as txn:\n",
" # key = str(key).encode()\n",
" value = txn.get(key)\n",
" if value is None:\n",
" raise KeyError\n",
" data = pickle.loads(value)\n",
" max_n_atoms = max(max_n_atoms, data['element'].shape[0])\n",
" # break\n",
"print(max_n_atoms)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create grover embedding"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# 1. Tou Tou is going to create smile only csv\n",
"import pandas as pd\n",
"import os\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/reaction.csv\", sep=',')\n",
"results = [smile for smile in df['substrates']]\n",
"data = {\n",
" \"substrates\": results\n",
"}\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(f\"{root_dir}/data/brenda/reaction_smiles.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"python /projects/bbhh/suyufeng/enzyme_specificity/src/other_softwares/grover_software/scripts/save_features.py --data_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/reaction_smiles.csv --save_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/reaction.npz --features_generator fgtasklabel --restart\n"
]
}
],
"source": [
"# 2. Get npz feature\n",
"print(f\"python {root_dir}/src/other_softwares/grover_software/scripts/save_features.py --data_path {root_dir}/data/brenda/reaction_smiles.csv \\\n",
" --save_path {root_dir}/data/brenda/reaction.npz \\\n",
" --features_generator fgtasklabel \\\n",
" --restart\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"python /projects/bbhh/suyufeng/enzyme_specificity/src/other_softwares/grover_software/scripts/build_vocab.py --data_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/reaction_smiles.csv --vocab_save_folder /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/grover_vocab --dataset_name brenda\n"
]
}
],
"source": [
"# 3. Get build vocab\n",
"print(f\"python {root_dir}/src/other_softwares/grover_software/scripts/build_vocab.py --data_path {root_dir}/data/brenda/reaction_smiles.csv \\\n",
" --vocab_save_folder {root_dir}/data/brenda/grover_vocab \\\n",
" --dataset_name brenda\")\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"python main.py fingerprint --data_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/reaction_smiles.csv --features_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/reaction.npz --checkpoint_path /projects/bbhh/suyufeng/enzyme_specificity/data/pretrain_model/grover_large.pt --fingerprint_source both --output /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/fingerprint.npz --save_lmdb_path /projects/bbhh/suyufeng/enzyme_specificity/data/brenda/grover_fingerprint.lmdb --fingerprint_source both\n"
]
}
],
"source": [
"# 4. Get fingerprint\n",
"print(f\"python main.py fingerprint --data_path {root_dir}/data/brenda/reaction_smiles.csv --features_path {root_dir}/data/brenda/reaction.npz --checkpoint_path {root_dir}/data/pretrain_model/grover_large.pt --fingerprint_source both --output {root_dir}/data/brenda/fingerprint.npz --save_lmdb_path {root_dir}/data/brenda/grover_fingerprint.lmdb --fingerprint_source both\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create morgan embedding"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[01:34:51] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:51] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:51] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:52] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:53] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:53] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:53] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:53] WARNING: not removing hydrogen atom without neighbors\n",
"[01:34:53] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:01] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:12] WARNING: not removing hydrogen atom without neighbors\n",
"[01:35:12] WARNING: not removing hydrogen atom without neighbors\n"
]
}
],
"source": [
"from rdkit.Chem import AllChem\n",
"from rdkit import Chem\n",
"\n",
"path = f\"{root_dir}/data/brenda/reaction_smiles.csv\"\n",
"df = pd.read_csv(path, sep=',')\n",
"results = []\n",
"for smile in df['substrates']:\n",
" m1 = Chem.MolFromSmiles(smile)\n",
" result = np.array(AllChem.GetMorganFingerprintAsBitVect(m1,2,nBits=1024))\n",
" results.append(result)\n",
"np.save(f\"{root_dir}/data/brenda/morgan_fingerprint.npy\", np.array(results))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate negative samples"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/u/suyufeng/.conda/envs/revae/lib/python3.7/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:03<00:00, 53222.42it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:03<00:00, 54281.39it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:03<00:00, 49189.43it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:02<00:00, 73363.72it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:03<00:00, 61505.72it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Start generating negative samples!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 187195/187195 [00:02<00:00, 78105.59it/s]\n"
]
}
],
"source": [
"from Datasets.utils import generate_negative_sample\n",
"\n",
"pdf = pd.read_csv(f\"{root_dir}/data/positive_data.csv\", sep=',', index_col=False)\n",
"pdf['difficulty'] = [-1] * pdf['ecnumber'].values.shape[0]\n",
"pdf['fake_ecnumber'] = pdf['ecnumber']\n",
"pdf['label'] = [1] * pdf['ecnumber'].values.shape[0]\n",
"dfs = [pdf]\n",
"for same_digits in range(0, 6):\n",
" df = generate_negative_sample(pdf, same_digits=same_digits, num_negative_enzyme=1, has_positive_sample=False)\n",
" df['difficulty'] = [same_digits] * df.index.shape[0]\n",
" dfs.append(df)\n",
"df = pd.concat(dfs)\n",
"df.to_csv(f\"{root_dir}/data/data.csv\", index=False)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# MISC: Add more information for docking simulation"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"t = pd.read_csv(f\"{root_dir}/data/data.csv\", sep=',')\n",
"enzyme_df = pd.read_csv(f\"{root_dir}/data/brenda/enzymes.csv\", sep=',')\n",
"reaction_df = pd.read_csv(f\"{root_dir}/data/brenda/reaction.csv\", sep=',')\n",
"\n",
"enzmye_dict = {index: uniprot for index, uniprot in enumerate(enzyme_df['uniprots'])}\n",
"reaction_dict = {index: (reaction, substrate) for index, (reaction, substrate) in enumerate(zip(reaction_df['reactions'], reaction_df['substrates']))}\n",
"\n",
"uniprots = []\n",
"reactions = []\n",
"substrates = []\n",
"activesites = []\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/data_cofactor.csv\", sep=',')\n",
"uniprot_active_site_dict = {uniprot: active_site for uniprot, active_site in zip(df['uniprot'], df['activesites'])}\n",
"\n",
"for enzyme, reaction in zip(t['enzyme'], t['reaction']):\n",
" uniprot = enzmye_dict[enzyme]\n",
" reaction, substrate = reaction_dict[reaction]\n",
" uniprots.append(uniprot)\n",
" reactions.append(reaction)\n",
" substrates.append(substrate)\n",
" activesites.append(uniprot_active_site_dict[uniprot])\n",
"\n",
"t['uniprot'] = uniprots\n",
"t['reaction'] = reactions\n",
"t['substrate'] = substrates\n",
"t['active_site'] = activesites\n",
"\n",
"t.to_csv(f\"{root_dir}/data/to_docking.csv\", index=False)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# MISC: create cofactor names"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 8281/8281 [00:26<00:00, 311.25it/s]\n"
]
}
],
"source": [
"import pandas as pd\n",
"from tqdm import tqdm\n",
"import glob\n",
"\n",
"root_dir = \"/projects/bbhh/suyufeng/enzyme_specificity\"\n",
"\n",
"data = {\n",
" \"name\": [],\n",
" \"inchi\": []\n",
"}\n",
"name_dict = {}\n",
"for name in tqdm(glob.glob(f\"{root_dir}/data/brenda/raw_data/ligand/*.csv\")):\n",
" # if '7.2.4.5.csv' not in name:\n",
" # continue\n",
" try:\n",
" t = pd.read_csv(name, sep='\\t', header=None)\n",
" for name, type, inchi in zip(t[0], t[2], t[4]):\n",
" if 'Cofactor' in type:\n",
" if name not in name_dict:\n",
" name_dict[name] = inchi\n",
" data['name'].append(name)\n",
" data['inchi'].append(inchi)\n",
" except:\n",
" pass\n",
"# print(data)\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(\"to_ocean.csv\", index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# MISC: post-process docking data and training data"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"258061\n"
]
}
],
"source": [
"import pandas as pd\n",
"\n",
"root_dir = \"/projects/bbto/suyufeng/enzyme_specificity\"\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/data.csv\", sep=',')\n",
"enzyme_df = pd.read_csv(f\"{root_dir}/data/brenda/enzymes.csv\", sep=',')\n",
"reaction_df = pd.read_csv(f\"{root_dir}/data/brenda/reaction.csv\", sep=',')\n",
"\n",
"docking_index_df = pd.read_csv(f\"{root_dir}/data/brenda/docking_subset_data.csv\", sep=',')\n",
"\n",
"docking_index_dict = {(substrate, uniprot):index for index, (substrate, uniprot) in enumerate(zip(docking_index_df['substrate'], docking_index_df['uniprot']))}\n",
"\n",
"enzyme_dict = {uniprot: index for index, uniprot in enumerate(enzyme_df['uniprots'])}\n",
"reaction_dict = {reaction: index for index, reaction in enumerate(reaction_df['reactions'])}\n",
"\n",
"substrate_enzyme_dict = {}\n",
"\n",
"# index,reaction,enzyme,ecnumber,difficulty,fake_ecnumber,label,uniprot,substrate,active_site\n",
"\n",
"data = {\n",
" \"enzyme\": [],\n",
" \"reaction\": [],\n",
" \"label\": [],\n",
" \"ecnumber\": [],\n",
" \"difficulty\": [],\n",
" \"fake_ecnumber\": [],\n",
" \"structure_index\": [],\n",
" \"substrate\": []\n",
"}\n",
"\n",
"cnt = 0\n",
"for enzyme, reaction, label, ecnumber, difficulty, fake_ecnumber, substrate, uniprot in zip(df['enzyme'], df['reaction'], df['label'], df['ecnumber'], df['difficulty'], df['fake_ecnumber'], df['substrate'], df['uniprot']):\n",
" if (uniprot, substrate) in substrate_enzyme_dict:\n",
" continue\n",
" \n",
" substrate_enzyme_dict[(uniprot, substrate)] = 1\n",
" if (substrate, uniprot) in docking_index_dict:\n",
" data['structure_index'].append(docking_index_dict[(substrate, uniprot)])\n",
" cnt += 1\n",
" else:\n",
" data['structure_index'].append(-1)\n",
" \n",
" data['enzyme'].append(enzyme)\n",
" data['reaction'].append(reaction_dict[reaction])\n",
" data['label'].append(label)\n",
" data['ecnumber'].append(ecnumber)\n",
" data['difficulty'].append(difficulty)\n",
" data['fake_ecnumber'].append(fake_ecnumber)\n",
" data['substrate'].append(substrate)\n",
" \n",
"print(cnt)\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(f\"{root_dir}/data/brenda/final_data/data.csv\", index=False)\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import os\n",
"from tqdm import tqdm\n",
"\n",
"root_dir = \"/projects/bbto/suyufeng/enzyme_specificity\"\n",
"df = pd.read_csv(f\"{root_dir}/data/brenda/final_data/data.csv\", sep=',')\n",
"# df = df.loc[df[\"structure_index\"] != -1, :].reset_index(drop=True)\n",
"\n",
"def save_data_csv(datas, file_name, substrate_constraint=None, enzyme_constraint=None):\n",
" data_df = {\n",
" \"enzyme\": [],\n",
" \"reaction\": [],\n",
" \"label\": [],\n",
" \"ecnumber\": [],\n",
" \"difficulty\": [],\n",
" \"fake_ecnumber\": [],\n",
" \"structure_index\": []\n",
" }\n",
" if substrate_constraint is not None:\n",
" substrate_dict = {substrate: 1 for substrate in substrate_constraint}\n",
" else:\n",
" substrate_dict = None\n",
" if enzyme_constraint is not None:\n",
" enzyme_dict = {enzyme: 1 for enzyme in enzyme_constraint}\n",
" else:\n",
" enzyme_dict = None\n",
"\n",
" for (enzyme, reaction, label, ecnumber, difficulty, fake_ecnumber, structure_index, substrate) in tqdm(datas):\n",
" if substrate_dict is not None and substrate not in substrate_dict:\n",
" continue\n",
" if enzyme_dict is not None and enzyme not in enzyme_dict:\n",
" continue\n",
" data_df['enzyme'].append(enzyme)\n",
" data_df['reaction'].append(reaction)\n",
" data_df['label'].append(label)\n",
" data_df['ecnumber'].append(ecnumber)\n",
" data_df['difficulty'].append(difficulty)\n",
" data_df['fake_ecnumber'].append(fake_ecnumber)\n",
" data_df['structure_index'].append(structure_index)\n",
"\n",
" data_df = pd.DataFrame(data_df)\n",
" data_df.to_csv(f\"{root_dir}/data/brenda/final_data/{file_name}.csv\", sep=',', index=False)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 439887/439887 [00:00<00:00, 986228.13it/s]\n",
"100%|██████████| 43988/43988 [00:00<00:00, 830168.08it/s]\n",
"100%|██████████| 102640/102640 [00:00<00:00, 900506.54it/s]\n",
"100%|██████████| 439886/439886 [00:00<00:00, 1018774.90it/s]\n",
"100%|██████████| 43988/43988 [00:00<00:00, 1197742.42it/s]\n",
"100%|██████████| 102641/102641 [00:00<00:00, 1106762.98it/s]\n",
"100%|██████████| 439886/439886 [00:00<00:00, 1296692.38it/s]\n",
"100%|██████████| 43988/43988 [00:00<00:00, 781040.90it/s]\n",
"100%|██████████| 102641/102641 [00:00<00:00, 797363.20it/s]\n",
"100%|██████████| 439886/439886 [00:00<00:00, 844123.74it/s]\n",
"100%|██████████| 43988/43988 [00:00<00:00, 1199533.48it/s]\n",
"100%|██████████| 102641/102641 [00:00<00:00, 1045601.40it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1063986.17it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1692305.93it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1733292.52it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1374069.98it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1883133.64it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1617217.43it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 953627.83it/s] \n",
"100%|██████████| 586515/586515 [00:00<00:00, 1680172.04it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1758742.85it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1046285.91it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2007499.67it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1721236.52it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1309771.61it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1804690.40it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1524289.72it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1195837.07it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2080093.19it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2160284.85it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1255563.71it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2006834.77it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1814763.19it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1322313.19it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1986556.31it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1846399.15it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1333941.13it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2315689.89it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1799873.58it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1329479.43it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1436043.08it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1626469.57it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1310712.32it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2085367.22it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1945476.70it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1411334.41it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 2304889.33it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1907212.77it/s]\n",
"100%|██████████| 586515/586515 [00:00<00:00, 1292973.08it/s]\n"
]
}
],
"source": [
"datas = [\n",
" (enzyme, reaction, label, ecnumber, difficulty, fake_ecnumber, structure_index, substrate)\n",
" for enzyme, reaction, label, ecnumber, difficulty, fake_ecnumber, structure_index, substrate in zip(df['enzyme'], df['reaction'], df['label'], df['ecnumber'], df['difficulty'], df['fake_ecnumber'], df['structure_index'], df['substrate'])\n",
"]\n",
"substrates = list(set([substrate for substrate in df['substrate']]))\n",
"enzymes = list(set([enzyme for enzyme in df['enzyme']]))\n",
"# random split\n",
"import random\n",
"random.shuffle(datas)\n",
"random.shuffle(substrates)\n",
"random.shuffle(enzymes)\n",
"\n",
"os.makedirs(f\"{root_dir}/data/brenda/final_data/random_split\", exist_ok=True)\n",
"for i in range(4):\n",
" training_datas = datas[:int(len(datas) * 0.25 * i)] + datas[int(len(datas) * 0.25 * (i + 1)):]\n",
" \n",
" testing_datas = datas[int(len(datas) * 0.25 * i):int(len(datas) * 0.25 * (i + 1))]\n",
" val_datas = testing_datas[:int(len(testing_datas) * 0.3)]\n",
" testing_datas = testing_datas[int(len(testing_datas) * 0.3):]\n",
"\n",
" save_data_csv(training_datas, f\"random_split/training_datas_{i}\")\n",
" save_data_csv(val_datas, f\"random_split/val_datas_{i}\")\n",
" save_data_csv(testing_datas, f\"random_split/testing_datas_{i}\")\n",
" \n",
"os.makedirs(f\"{root_dir}/data/brenda/final_data/reaction_split\", exist_ok=True)\n",
"os.makedirs(f\"{root_dir}/data/brenda/final_data/enzyme_split\", exist_ok=True)\n",
"os.makedirs(f\"{root_dir}/data/brenda/final_data/all_split\", exist_ok=True)\n",
"for i in range(4):\n",
" # enzyme\n",
" training_enzymes = enzymes[:int(len(enzymes) * 0.25 * i)] + enzymes[int(len(enzymes) * 0.25 * (i + 1)):]\n",
"\n",
" testing_enzymes = enzymes[int(len(enzymes) * 0.25 * i):int(len(enzymes) * 0.25 * (i + 1))]\n",
" val_enzymes = testing_enzymes[:int(len(testing_enzymes) * 0.3)]\n",
" testing_enzymes = testing_enzymes[int(len(testing_enzymes) * 0.3):]\n",
" if len(val_enzymes) == 0:\n",
" val_enzymes = testing_enzymes[:2]\n",
" testing_enzymes = testing_enzymes[2:]\n",
"\n",
" # substrate\n",
" training_substrates = substrates[:int(len(substrates) * 0.25 * i)] + substrates[int(len(substrates) * 0.25 * (i + 1)):]\n",
" \n",
" testing_substrates = substrates[int(len(substrates) * 0.25 * i):int(len(substrates) * 0.25 * (i + 1))]\n",
" val_substrates = testing_substrates[:int(len(testing_substrates) * 0.3)]\n",
" testing_substrates = testing_substrates[int(len(testing_substrates) * 0.3):]\n",
" if len(val_substrates) == 0:\n",
" val_substrates = testing_substrates[:2]\n",
" testing_substrates = testing_substrates[2:]\n",
"\n",
" save_data_csv(datas, f\"enzyme_split/training_datas_{i}\", enzyme_constraint=training_enzymes)\n",
" save_data_csv(datas, f\"enzyme_split/val_datas_{i}\", enzyme_constraint=val_enzymes)\n",
" save_data_csv(datas, f\"enzyme_split/testing_datas_{i}\", enzyme_constraint=testing_enzymes)\n",
"\n",
" save_data_csv(datas, f\"reaction_split/training_datas_{i}\", substrate_constraint=training_substrates)\n",
" save_data_csv(datas, f\"reaction_split/val_datas_{i}\",substrate_constraint=val_substrates)\n",
" save_data_csv(datas, f\"reaction_split/testing_datas_{i}\", substrate_constraint=testing_substrates)\n",
"\n",
" save_data_csv(datas, f\"all_split/training_datas_{i}\", substrate_constraint=training_substrates, enzyme_constraint=training_enzymes)\n",
" save_data_csv(datas, f\"all_split/val_datas_{i}\",substrate_constraint=val_substrates, enzyme_constraint=val_enzymes)\n",
" save_data_csv(datas, f\"all_split/testing_datas_{i}\", substrate_constraint=testing_substrates,enzyme_constraint=testing_enzymes)\n",
"\n",
"save_data_csv(datas, \"big_datas\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generate spreadsheet for baselines"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"def amend_meta(enzyme_path, reaction_path, data_path, tag):\n",
" enzyme_df = pd.read_csv(enzyme_path, sep=',')\n",
" reaction_df = pd.read_csv(reaction_path, sep=',')\n",
" data_df = pd.read_csv(data_path, sep=',')\n",
"\n",
" enzyme_sequences = []\n",
" substrate_smiles = []\n",
"\n",
" enzyme_dict = {index: sequence for index, sequence in enumerate(enzyme_df['sequences'])}\n",
" reaction_dict = {index: substrate for index, substrate in enumerate(reaction_df['substrates'])}\n",
"\n",
" for enzyme, reaction in zip(data_df['enzyme'], data_df['reaction']):\n",
" enzyme_sequences.append(enzyme_dict[enzyme])\n",
" substrate_smiles.append(reaction_dict[reaction])\n",
"\n",
" data_df['enzyme_sequence'] = enzyme_sequences\n",
" data_df['substrate_smile'] = substrate_smiles\n",
"\n",
" data_df.to_csv(f\"/projects/bbto/suyufeng/enzyme_specificity/saved_data/{tag}.csv\", index=False)\n",
" # for index, df in enumerate(np.array_split(data_df, 7)):\n",
" # df.to_csv(f\"/projects/bbto/suyufeng/enzyme_specificity/saved_data/{tag}_{index}.csv\", index=False)\n",
"\n",
"# for tag in [\"Duf\", \"Gt_acceptor\", \"halogenase\", \"Nitrilase\", \"Phosphatase\", \"Thiolase\", \"Esterase\", \"experiment\"]:\n",
"for tag in [\"2023_brenda\"]:\n",
" root_dir = f\"/projects/bbto/suyufeng/enzyme_specificity/data/small_family/{tag}\"\n",
" enzyme_path = f\"{root_dir}/enzymes.csv\"\n",
" reaction_path = f\"{root_dir}/reactions.csv\"\n",
" data_path = f\"{root_dir}/big_datas.csv\"\n",
" amend_meta(enzyme_path, reaction_path, data_path, tag)\n",
"\n",
"# enzyme_path = \"/projects/bbto/suyufeng/enzyme_specificity/data/brenda/enzymes.csv\"\n",
"# reaction_path = \"/projects/bbto/suyufeng/enzyme_specificity/data/brenda/reaction.csv\"\n",
"# data_path = \"/projects/bbto/suyufeng/enzyme_specificity/data/brenda/final_data/big_datas.csv\"\n",
"# amend_meta(enzyme_path, reaction_path, data_path, \"brenda\")\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.7.13 ('revae')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "ea8e2fe48c7ffefb1d2d9f58e61432847d99d21375144a203023aa806149d953"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
3D | antecede/EZSpecificity | Datasets/const.py | .py | 4,856 | 161 | import numpy as np
global restype_3to1, restype_1to3, restype_name_to_atom14_names, restypes, res_angle_alt, n_res_chi, atom_type_masks, weights, letter_to_num, num_to_letter
letter_to_num = {'C': 4, 'D': 3, 'S': 15, 'Q': 5, 'K': 11, 'I': 9,
'P': 14, 'T': 16, 'F': 13, 'A': 0, 'G': 7, 'H': 8,
'E': 6, 'L': 10, 'R': 1, 'W': 17, 'V': 19,
'N': 2, 'Y': 18, 'M': 12, 'X': 20, 'Z': 21, 'U': 22, 'B': 23}
num_to_letter = {v:k for k, v in letter_to_num.items()}
restypes = [
'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',
'S', 'T', 'W', 'Y', 'V'
]
weights = {
'A': 1,
'R': 1,
'N': 1,
'D': 1,
'C': 5,
'Q': 1,
'E': 1,
'G': 1,
'H': 5,
'I': 1,
'L': 1,
'K': 1,
'M': 5,
'F': 1,
'P': 1,
'S': 1,
'T': 1,
'W': 5,
'Y': 1,
'V': 1,
}
restype_1to3 = {
'A': 'ALA',
'R': 'ARG',
'N': 'ASN',
'D': 'ASP',
'C': 'CYS',
'Q': 'GLN',
'E': 'GLU',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'L': 'LEU',
'K': 'LYS',
'M': 'MET',
'F': 'PHE',
'P': 'PRO',
'S': 'SER',
'T': 'THR',
'W': 'TRP',
'Y': 'TYR',
'V': 'VAL',
}
restype_name_to_atom14_names = {
'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''],
'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2',
'', '', ''],
'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '',
''],
'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '',
''],
'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''],
'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '',
'', ''],
'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '',
'', ''],
'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''],
'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '',
'', '', ''],
'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '',
''],
'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '',
''],
'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '',
''],
'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '',
''],
'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ',
'', '', ''],
'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''],
'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''],
'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '',
''],
'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3',
'CZ2', 'CZ3', 'CH2'],
'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ',
'OH', '', ''],
'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '',
''],
'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', ''],
}
n_res_chi = {
'A': 0,
'R': 4,
'N': 2,
'D': 2,
'C': 1,
'Q': 3,
'E': 3,
'G': 0,
'H': 2,
'I': 2,
'L': 2,
'K': 4,
'M': 3,
'F': 2,
'P': 2,
'S': 1,
'T': 1,
'W': 2,
'Y': 2,
'V': 1,
}
restype_3to1 = {
'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G', 'HIS': 'H',
'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q',
'ARG': 'R', 'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y',
}
res_angle_alt = np.ones((20, 19)).astype(np.int16)
res_angle_alt[3, 2+11] = res_angle_alt[3, 3+11] = -1
res_angle_alt[6, 4+11] = res_angle_alt[6, 5+11] = -1
res_angle_alt[13, 2+11] = res_angle_alt[13, 3+11] = -1
res_angle_alt[18, 2+11] = res_angle_alt[18, 3+11] = -1
atom_type_masks = np.zeros((20, 4, 14), dtype=int)
atom_type_one_hot = np.zeros((14, 4), dtype=int)
for key in restype_name_to_atom14_names.keys():
if key == 'UNK':
continue
keyid = restypes.index(restype_3to1[key])
atoms = restype_name_to_atom14_names[key]
masks = []
for c in ['C', 'N', 'O', 'S']:
mask = np.zeros(14, dtype=int)
for index, atom in enumerate(atoms):
if atom == 'C' or atom == 'CA':
continue
if len(atom) > 0 and atom[0] == c:
mask[index] = 1
else:
mask[index] = 0
masks.append(mask)
atom_type_masks[keyid] = np.array(masks)
| Python |
3D | antecede/EZSpecificity | Datasets/brenda.py | .py | 2,900 | 68 | import pytorch_lightning as pl
from torch_geometric.data import DataLoader
from torch_geometric.transforms import Compose
from rdkit import RDLogger
from Datasets.data_representer import get_representer
import Datasets.Structure.transforms as utils_trans
from Datasets.utils import read_datasets
RDLogger.DisableLog('rdApp.*')
class Singledataset(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
self.config = config
self.batch_size = config.data.batch_size
self.n_worker = config.num_cpus
self.train_df = read_datasets(config.data.train_data_path)
self.val_df = read_datasets(config.data.val_data_path)
self.test_df = read_datasets(config.data.test_data_path)
self.train_transform = self.get_transform()
self.val_transform = self.get_transform(is_train=False)
def get_transform(self, is_train=True):
if self.config.data.representer == 'cpi':
transform = None
else:
transform = Compose([
utils_trans.FeaturizeProteinAtom(),
utils_trans.FeaturizeLigandAtom(),
utils_trans.EdgeConnection(dist_noise=self.config.transform.dist_noise if is_train else False, cutoff=self.config.transform.cutoff, num_r_gaussian=self.config.transform.num_r_gaussian, k=self.config.transform.k)
])
return transform
def train_dataloader(self):
data = get_representer(df=self.train_df, config=self.config, transform=self.train_transform, is_train=True)
return DataLoader(data, batch_size=self.batch_size, shuffle=True, num_workers=self.n_worker, follow_batch=['ligand_index'])
def val_dataloader(self):
data = get_representer(df=self.val_df, config=self.config, transform=self.val_transform, is_train=False)
return DataLoader(data, batch_size=self.batch_size, shuffle=False, num_workers=self.n_worker, follow_batch=['ligand_index'])
def test_dataloader(self):
data = get_representer(df=self.test_df, config=self.config, transform=self.val_transform, is_train=False)
return DataLoader(data, batch_size=self.batch_size, shuffle=False, num_workers=self.n_worker, follow_batch=['ligand_index'])
if __name__ == "__main__":
pass
# from easydict import EasyDict
# import yaml
# import warnings
# import warnings
# warnings.filterwarnings("ignore")
# df = pd.read_csv("/work/yufeng/2022/enzyme_specificity/data/data.csv", sep=',')
# config = load_config("/work/yufeng/2022/enzyme_specificity/src/Configs/specificity.yml")
# reaction = Reaction(df=df, st=0, ed=len(df.index), config=config)
# dataloader = DataLoader(reaction, batch_size=2, shuffle=True, num_workers=20)
# for _ in dataloader:
# print(_)
# break
# print(len(reaction))
# print(reaction[0]) | Python |
3D | antecede/EZSpecificity | Datasets/create_features.py | .py | 27,473 | 629 | import sys
data_root_dir = "/scratch/bbto/suyufeng/tmp/enzyme_specificity"
src_root_dir = "/projects/bbto/suyufeng/enzyme_specificity"
sys.path.append(f"{src_root_dir}/src")
import numpy as np
import torch
from tqdm import tqdm
from rdkit import Chem, RDLogger
import ray
import pandas as pd
import lmdb
import pickle
import esm
from Datasets.utils import parse_smile, get_neighbor_list, check_smile_equal, convert_protein_sequence_to_number
# @ray.remote(num_cpus=1, max_calls=1)
def get_reaction_feature_single(item):
# disable warning
from transformers import logging
logging.set_verbosity_error()
RDLogger.DisableLog('rdApp.*')
# parameters
# print(item)
full_reaction, substrate, right, substrate_only = item
substrate_only = True
smiles = right.split(".")
ratio_threshold=0.8
miss_atom_threshold=2
confident_threshold=0
def generate_substrate_only_data(substrate_data):
data = {
'element': substrate_data['element'],
'edge_index': substrate_data['edge_index'],
'edge_type': substrate_data['edge_type'],
'atom_feature': substrate_data['atom_feature'],
'reaction_attention_label': np.zeros((substrate_data['element'].shape[0])),
'num_nodes': np.array(substrate_data['element'].shape[0])
}
return data
# delete molecular that doesn't contain any atom except h (rxnmapper can't handle this)
smile_temp = []
for smile in smiles:
mol = Chem.MolFromSmiles(smile)
if mol is not None:
mol = Chem.rdmolops.RemoveAllHs(mol)
if mol.GetNumAtoms() > 0:
smile_temp.append(smile)
smiles = smile_temp
# check valid of reaction
# 1. substrate doesn't contain any atom except h
mol = Chem.MolFromSmiles(substrate)
if mol is not None:
mol = Chem.rdmolops.RemoveAllHs(mol)
if mol.GetNumAtoms() == 0:
return full_reaction, substrate, "Substrate is H+", None
else:
return full_reaction, substrate, f"Substrate {substrate} is not parseable", None
# product unknwon
if substrate_only == True:
return full_reaction, substrate, f"Product unknown", generate_substrate_only_data(parse_smile(substrate)[0])
# # 2. product set empty
# if len(smiles) == 0:
# return full_reaction, substrate, 'No valid product', generate_substrate_only_data(parse_smile(substrate)[0])
# # get atom mapping via rxnmapper and create reaction attention label
# try:
# from rxnmapper import RXNMapper
# rxn_mapper = RXNMapper()
# results = rxn_mapper.get_attention_guided_atom_maps([full_reaction])[0]
# except Exception as e:
# return full_reaction, substrate, 'Error in rxnmapper: %s' % e, generate_substrate_only_data(parse_smile(substrate)[0])
# if results['confidence'] < confident_threshold:
# return full_reaction, substrate, 'Atom mapping confidence is too low: %f' % results['confidence'], generate_substrate_only_data(parse_smile(substrate)[0])
# left, right = results['mapped_rxn'].split('>>')
# reaction_attention_label = None
# for substrate_smile in left.split('.'):
# # Check reactant equals substrate
# if check_smile_equal(substrate_smile, substrate) == False:
# continue
# # Two dicts: m2s and s2m
# # m2s: rxn_id -> canonicalized id
# # s2m: canonicalized id -> rxn_id
# substrate_data, substrate_m2s, substrate_s2m, _ = parse_smile(substrate_smile)
# substrate_matched_index_dict = {}
# if reaction_attention_label is None:
# reaction_attention_label = np.zeros((substrate_data['element'].shape[0]))
# for product_smile in right.split("."):
# product_data, product_m2s, product_s2m, product_mol = parse_smile(product_smile)
# for atom in product_mol.GetAtoms():
# product_m_id = atom.GetAtomMapNum()
# if product_m_id in substrate_m2s and product_m_id != 0:
# substrate_s_id = substrate_m2s[product_m_id]
# substrate_matched_index_dict[substrate_s_id] = True
# product_s_id = product_m2s[product_m_id]
# if not np.equal(substrate_data['atom_feature'][substrate_s_id], product_data['atom_feature'][product_s_id]).any():
# reaction_attention_label[substrate_s_id] = 1
# else:
# substarte_neighbor = get_neighbor_list(substrate_data, substrate_s_id, substrate_s2m)
# product_neighbor = get_neighbor_list(product_data, product_s_id, product_s2m)
# if set(substarte_neighbor) != set(product_neighbor):
# reaction_attention_label[substrate_s_id] = 1
# if reaction_attention_label is None:
# return full_reaction, substrate, 'No matched substrate', generate_substrate_only_data(parse_smile(substrate)[0])
# for i in range(len(reaction_attention_label)):
# if substrate_data['element'][i] == 1:
# reaction_attention_label[i] = 0
# if len(substrate_matched_index_dict) / substrate_data['element'].shape[0] < ratio_threshold and substrate_data['element'].shape[0] - len(substrate_matched_index_dict) > miss_atom_threshold:
# return full_reaction, substrate, "Atom mapping ratio is too low: %f" % (len(substrate_matched_index_dict) / substrate_data['element'].shape[0]), generate_substrate_only_data(parse_smile(substrate)[0])
# # if np.sum(reaction_attention_label) == 0:
# # return full_reaction, substrate, "No atom changed in reaction", generate_substrate_only_data(parse_smile(substrate)[0])
# data = {
# 'element': substrate_data['element'],
# 'edge_index': substrate_data['edge_index'],
# 'edge_type': substrate_data['edge_type'],
# 'atom_feature': substrate_data['atom_feature'],
# 'reaction_attention_label': reaction_attention_label,
# 'num_nodes': np.array(substrate_data['element'].shape[0])
# }
# return full_reaction, substrate, None, data
def to_iterator(obj_ids):
while obj_ids:
done, obj_ids = ray.wait(obj_ids)
yield ray.get(done[0])
def get_reaction_parameters_new_brenda(df_path, allow_no_match=False):
df = pd.read_csv(df_path, sep=',').dropna(subset=['substrate', 'reaction'])
parameters_dict = {}
for substrate, reaction in zip(df['substrate'], df['reaction']):
left = reaction.split(">>")[0]
right = reaction.split(">>")[1]
if reaction not in parameters_dict and len(substrate) < 275:
parameters_dict[reaction] = (len(parameters_dict) - 1, (reaction, substrate, right, allow_no_match))
return parameters_dict
def get_reaction_parameters_brenda(df_path, allow_no_match=False):
df = pd.read_csv(df_path, sep=',').dropna(subset=['substrate', 'left', 'right'])
parameters_dict = {}
for left, substrate, right in zip(df['left'], df['substrate'], df['right']):
reaction = left + '>>' + right
if reaction not in parameters_dict and len(substrate) < 275:
parameters_dict[reaction] = (len(parameters_dict) - 1, (reaction, substrate, right, allow_no_match))
return parameters_dict
def get_reaction_parameters_halogenase(df_path):
df = pd.read_csv(df_path, sep=',').dropna(subset=['Substrate0', 'ReactionInLine'])
parameters_dict = {}
for substrate, reaction in zip(df['Substrate0'], df['ReactionInLine']):
if reaction not in parameters_dict:
parameters_dict[reaction] = (len(parameters_dict) - 1, (reaction, substrate, reaction.strip().split(">>")[1], False))
return parameters_dict
def get_reaction_parameters_new_brenda(df_path):
df = pd.read_csv(df_path, sep=',').dropna(subset=['Substrate SMILES'])
parameters_dict = {}
for substrate in df['Substrate SMILES']:
if substrate not in parameters_dict:
parameters_dict[substrate] = (len(parameters_dict) - 1, (substrate + ">>" + substrate, substrate, substrate, False))
return parameters_dict
def get_reaction_parameters_resume(df_path):
df = pd.read_csv(df_path, sep=',')
parameters_dict = {}
for index, (substrate, reaction) in enumerate(zip(df['substrates'], df['reactions'])):
parameters_dict[reaction] = (index, (reaction, substrate, reaction.strip().split(">>")[1], False))
return parameters_dict
def get_reaction_parameters_halogenase_evaluation(df_path):
df = pd.read_csv(df_path, sep=',').dropna(subset=['Canonical_SMILES'])
parameters_dict = {}
for substrate in df['Canonical_SMILES']:
if substrate not in parameters_dict:
parameters_dict[substrate] = (len(parameters_dict) - 1, (substrate + ">>" + substrate, substrate, substrate, True))
return parameters_dict
def get_reaction_parameters_small_family(df_path):
df = pd.read_csv(df_path, sep=',').dropna(subset=['SUBSTRATES'])
parameters_dict = {}
for substrate in df['SUBSTRATES']:
if substrate not in parameters_dict:
parameters_dict[substrate] = (len(parameters_dict) - 1, (substrate + ">>" + substrate, substrate, substrate, True))
return parameters_dict
def get_reaction_parameters_df(df_path, substrate_only=False):
df = pd.read_csv(df_path, sep=',')
parameters_dict = {}
for substrate in df['substrates']:
if substrate not in parameters_dict:
parameters_dict[substrate] = (len(parameters_dict), (substrate + ">>" + substrate, substrate, substrate, substrate_only))
return parameters_dict
def get_reaction_parameters_resume_evaluation(df_path):
df = pd.read_csv(df_path, sep=',')
parameters_dict = {}
for index, (substrate, reaction) in enumerate(zip(df['substrates'], df['reactions'])):
parameters_dict[reaction] = (index, (reaction, substrate, reaction.strip().split(">>")[1], True))
return parameters_dict
def get_reaction_feature(parameters_dict, save_lmdb_path, save_df_path, allow_no_match=False, resume=False):
# ray.init(num_cpus=40, num_gpus=1)
reactions = []
substrates = []
from multiprocessing.pool import Pool
num_skipped = 0
if resume:
reaction_id_dict = {
reaction: index for index, (reaction, substrate,_, _) in parameters_dict.values()
}
parameters = [parameter[1] for parameter in parameters_dict.values()]
db = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
with Pool(60) as pool:
for full_reaction, substrate, tag, data in pool.imap_unordered(get_reaction_feature_single, parameters):
if (data is not None and allow_no_match) or (tag is None):
with db.begin(write=True, buffers=True) as txn:
if resume:
txn.put(
key = str(reaction_id_dict[full_reaction]).encode(),
value = pickle.dumps(data)
)
else:
txn.put(
key = str(len(reactions)).encode(),
value = pickle.dumps(data)
)
reactions.append(full_reaction)
substrates.append(substrate)
else:
num_skipped += 1
print('Skipped (%d) %s %s' % (num_skipped, full_reaction, tag))
db.close()
if not resume:
data = {
"reactions": reactions,
"substrates": substrates
}
df = pd.DataFrame(data)
df.to_csv(save_df_path, sep=',', index=False)
print("Save reaction list")
ray.shutdown()
def get_enzyme_feature_small_famlity(df_path, save_df_path, save_lmdb_path):
df = pd.read_csv(df_path, sep=',')
env = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()
batch_converter = alphabet.get_batch_converter()
model = model.to(torch.device("cuda:0"))
data = []
uniprot_dict = {}
sequences = []
uniprots = []
if 'active_site' not in df.columns:
print("No active site information! Fill with -1")
df['active_site'] = np.ones_like(df['Enzyme_ID']).astype(int)
# TODO: download sequence from uniprot
for index, (sequence, uniprot, active_site) in enumerate(zip(df['SEQ'].values, df['Enzyme_ID'].values, df['active_site'].values)):
if uniprot in uniprot_dict:
continue
if len(sequence) > 1000:
continue
sequences.append(sequence)
uniprots.append(uniprot)
uniprot_dict[uniprot] = (len(uniprots) - 1, active_site)
data.append((uniprot, sequence))
generate_esm_embedding(env, data, uniprot_dict)
data = {
"sequences": sequences,
"uniprots": uniprots
}
df = pd.DataFrame(data)
df.to_csv(save_df_path, sep=',', index=False)
print("Save enzyme df")
# break
env.close()
def generate_esm_embedding(env, data, uniprot_dict, save_enzyme_df=None, mean_only=False):
model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()
model = model.to(torch.device("cuda:0"))
batch_converter = alphabet.get_batch_converter()
for small_data in tqdm(data, total=len(data)):
small_data = [small_data]
atch_labels, batch_strs, batch_tokens = batch_converter(small_data)
batch_tokens = batch_tokens.to(torch.device("cuda:0"))
# Extract per-residue representations (on CPU)
with torch.no_grad():
results = model(batch_tokens, repr_layers=[33], return_contacts=True)
token_representations = results["representations"][33].cpu()
# Generate per-sequence representations via averaging
# NOTE: token 0 is always a beginning-of-sequence token, so the first residue is token 1.
for i, (uniprot, seq) in enumerate(small_data):
with env.begin(write=True, buffers=True) as txn:
if mean_only:
embedding = token_representations[i, 1:1+len(seq)].detach().numpy()
else:
embedding = token_representations[i, 1:1+len(seq)].detach().numpy().mean(axis=0)
data = {
'embedding': embedding,
'active_site': uniprot_dict[uniprot][1],
'sequence': convert_protein_sequence_to_number(seq)
}
txn.put(key=str(uniprot_dict[uniprot][0]).encode(), value=pickle.dumps(data))
batch_tokens = batch_tokens.to(torch.device("cpu"))
def get_enzyme_feature_from_df(enzyme_df_path, save_lmdb_path, duplicate=False, save_enzyme_df=None):
df = pd.read_csv(enzyme_df_path, sep=',')
env = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
data = []
sequences = []
uniprots = []
uniprot_dict = {}
for index, (sequence, uniprot) in enumerate(zip(df["sequences"], df["uniprots"])):
if uniprot in uniprot_dict:
# print(uniprot, "already in lmdb")
# continue
if duplicate:
uniprot = str(index)
else:
print(uniprot, "already in lmdb")
continue
if len(sequence) > 1000:
print(uniprot, "sequence too long")
continue
try:
convert_protein_sequence_to_number(sequence)
except:
print(uniprot, "sequence contain non-standard amino acid")
continue
sequences.append(sequence)
uniprots.append(uniprot)
uniprot_dict[uniprot] = (len(uniprot_dict), 1)
data.append((uniprot, sequence))
generate_esm_embedding(env, data, uniprot_dict)
env.close()
if save_enzyme_df is not None:
data = {
"sequences": sequences,
"uniprots": uniprots
}
df = pd.DataFrame(data)
df.to_csv(save_enzyme_df, sep=',', index=False)
print("Save enzyme df")
def get_enzyme_feature_for_new_brenda(df_path, save_df_path, save_lmdb_path):
df = pd.read_csv(df_path, sep=',')
env = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
data = []
uniprot_dict = {}
sequences = []
uniprots = []
if 'active_site' not in df.columns:
print("No active site information! Fill with -1")
df['active_site'] = np.ones_like(df['Entry']).astype(int)
for index, (sequence, uniprot, active_site) in enumerate(zip(df['Sequence'].values, df['Entry'].values, df['active_site'].values)):
if uniprot in uniprot_dict:
continue
if len(sequence) > 1000:
continue
sequences.append(sequence)
uniprots.append(uniprot)
uniprot_dict[uniprot] = (len(uniprots) - 1, active_site)
data.append((uniprot, sequence))
generate_esm_embedding(env, data, uniprot_dict)
data = {
"sequences": sequences,
"uniprots": uniprots
}
df = pd.DataFrame(data)
df.to_csv(save_df_path, sep=',', index=False)
print("Save enzyme df")
# break
env.close()
def get_enzyme_feature(df_path, save_df_path, save_lmdb_path):
df = pd.read_csv(df_path, sep=',').dropna(subset=['substrate', 'left', 'right'])
env = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
df = pd.read_csv(df_path, sep=',')
data = []
uniprot_dict = {}
sequences = []
uniprots = []
if 'active_site' not in df.columns:
print("No active site information! Fill with -1")
df['active_site'] = np.ones_like(df['uniprot']).astype(int)
# TODO: download sequence from uniprot
for index, (sequence, uniprot, active_site) in enumerate(zip(df['sequence'].values, df['uniprot'].values, df['active_site'].values)):
if uniprot in uniprot_dict:
continue
if len(sequence) > 1000:
continue
sequences.append(sequence)
uniprots.append(uniprot)
uniprot_dict[uniprot] = (len(uniprots) - 1, active_site)
data.append((uniprot, sequence))
generate_esm_embedding(env, data, uniprot_dict)
data = {
"sequences": sequences,
"uniprots": uniprots
}
df = pd.DataFrame(data)
df.to_csv(save_df_path, sep=',', index=False)
print("Save enzyme df")
# break
env.close()
def get_enzyme_fature_tmap(fasta_path, save_lmdb_path):
# disable warning
from transformers import logging
from Bio import SeqIO
logging.set_verbosity_error()
RDLogger.DisableLog('rdApp.*')
# parameters)
env = lmdb.open(
save_lmdb_path,
map_size=600*(1024*1024*1024), # 600GB
create=True,
subdir=False,
readonly=False, # Writable
)
data = []
uniprot_dict = {}
records = list(SeqIO.parse(fasta_path, "fasta"))
for record in records[:200000]:
uniprot = record.id.split("|")[1]
sequence = str(record.seq)
if uniprot in uniprot_dict:
continue
if len(sequence) > 1000:
continue
data.append((uniprot, sequence))
uniprot_dict[uniprot] = len(uniprot_dict)
generate_esm_embedding(env, data, uniprot_dict, mean_only=True)
env.close()
if __name__ == "__main__":
# Check get_reaction_feature_single function
# object = get_reaction_feature_single.remote(('CC1(O)CC(=O)OC(O)C1.N=C(O)C1=CN([C@@H]2O[C@H](COP(=O)(O)OP(=O)(O)OC[C@H]3O[C@@H](n4cnc5c(N)ncnc54)[C@H](OP(=O)(O)O)[C@@H]3O)[C@@H](O)[C@H]2O)C=CC1>>N=C(O)c1ccc[n+]([C@@H]2O[C@H](COP(=O)(O)OP(=O)(O)OC[C@H]3O[C@@H](n4cnc5c(N)ncnc54)[C@H](OP(=O)(O)O)[C@@H]3O)[C@@H](O)[C@H]2O)c1',
# 'CC1(O)CC(=O)OC(O)C1',
# 'N=C(O)c1ccc[n+]([C@@H]2O[C@H](COP(=O)(O)OP(=O)(O)OC[C@H]3O[C@@H](n4cnc5c(N)ncnc54)[C@H](OP(=O)(O)O)[C@@H]3O)[C@@H](O)[C@H]2O)c1'))
# print(ray.get(object))
# Create reaction features
# specificity (full)
# save_df_path = f"{root_dir}/data/new_brenda/reaction.csv"
# save_lmdb_path = f"{root_dir}/data/new_brenda/reaction_features.lmdb"
# input_path = f"{root_dir}/data/new_brenda/data.csv"
# parameters_dict = get_reaction_parameters_new_brenda(df_path=input_path)
# get_reaction_feature(parameters_dict=parameters_dict, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=True)
# selecitivity (available reaction)
# save_df_path = f"{root_dir}/data/reaction.csv"
# save_lmdb_path = f"{root_dir}/data/reaction_features.lmdb"
# input_path = f"{root_dir}/data/brenda/reduced_data.csv"
# parameters_dict = get_reaction_parameters_brenda(df_path=input_path)
# get_reaction_feature(parameters_dict=parameters_dict, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=True)
# Check generated reaction features
# save_lmdb_path = f"{root_dir}/data/halogenase/reaction_features.lmdb"
# db = lmdb.open(
# save_lmdb_path,
# map_size=10*(1024*1024*1024), # 10GB
# create=False,
# subdir=False,
# readonly=True,
# lock=False,
# readahead=False,
# meminit=False,
# )
# with db.begin() as txn:
# keys = list(txn.cursor().iternext(values=False))
# for key in keys:
# with db.begin(write=False, buffers=True) as txn:
# # key = str(key).encode()
# value = txn.get(key)
# if value is None:
# raise KeyError
# print(key.decode())
# print(pickle.loads(value))
# break
# Create enzyme features
# save_df_path = f"{root_dir}/data/brenda/enzymes.csv"
# save_lmdb_path = f"{root_dir}/data/brenda/enzyme_features.lmdb"
# input_path = f"{root_dir}/data/brenda/data_cofactor.csv"
# get_enzyme_feature(df_path=input_path, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path)
# Check generated enzyme features
# save_lmdb_path = f"{root_dir}/data/enzyme_features.lmdb"
# db = lmdb.open(
# save_lmdb_path,
# map_size=10*(1024*1024*1024), # 10GB
# create=False,
# subdir=False,
# readonly=True,
# lock=False,
# readahead=False,
# meminit=False,
# )
# with db.begin() as txn:
# keys = list(txn.cursor().iternext(values=False))
# print(keys)
# Create enzyme features for small family dataset
# for enzyme_tag in ["Duf", "Esterase", "Phosphatase", "Gt_acceptor", "Nitrilase", "Thiolase"]:
# for enzyme_tag in ["halogenase"]:
# save_df_path = f"{root_dir}/data/small_family/{enzyme_tag}/enzymes.csv"
# save_lmdb_path = f"{root_dir}/data/small_family/{enzyme_tag}/enzyme_features.lmdb"
# input_path = f"{root_dir}/data/small_family/{enzyme_tag}/data.csv"
# get_enzyme_feature_small_famlity(df_path=input_path, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path)
# Create enzyme feature for new Brenda dataset
# save_df_path = f"{root_dir}/data/small_family/2023_brenda/enzymes.csv"
# save_lmdb_path = f"{root_dir}/data/small_family/2023_brenda/enzyme_features.lmdb"
# input_path = f"{root_dir}/data/small_family/2023_brenda/data.csv"
# get_enzyme_feature_for_new_brenda(df_path=input_path, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path)
# Create enzyme feature from prepared enzyme df
# save_enzyme_df = f"{root_dir}/data/small_family/Methyltransferse/enzymes_new.csv"
# get_enzyme_feature_from_df(enzyme_df_path=f"{root_dir}/data/small_family/Methyltransferse/enzymes.csv", save_lmdb_path=f"{root_dir}/data/small_family/Methyltransferse/enzyme_features.lmdb", duplicate=True, save_enzyme_df=save_enzyme_df)
# Create esm embedding for universal protein
get_enzyme_fature_tmap(fasta_path=f"/scratch/bcao/suyufeng/uniprot_sprot.fasta", save_lmdb_path=f"/scratch/bcao/suyufeng/uniprot_features.lmdb")
# Create reaction features for prepared reaction df
# save_lmdb_path = f"{root_dir}/data/small_family/Methyltransferse/reaction_features.lmdb"
# data_path = f"{root_dir}/data/small_family/Methyltransferse/reactions.csv"
# parameters = get_reaction_parameters_df(data_path, substrate_only=True)
# get_reaction_feature(parameters_dict=parameters, save_lmdb_path=save_lmdb_path, save_df_path=None, allow_no_match=True, resume=True)
# Create reaction features for halogenase
# df_path = f"{root_dir}/data/small_family/halogenase/data.csv"
# save_lmdb_path = f"{root_dir}/data/small_family/halogenase/reaction_features.lmdb"
# save_df_path = f"{root_dir}/data/small_family/halogenase/reactions.csv"
# parameters = get_reaction_parameters_halogenase(df_path)
# get_reaction_feature(parameters_dict=parameters, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=False)
# Create reaction features for halogenase evaluation dataset
# save_lmdb_path = f"{root_dir}/data/halogenase/evaluation/reaction_features.lmdb"
# save_df_path = f"{root_dir}/data/halogenase/evaluation/reactions.csv"
# # parameters = get_reaction_parameters_halogenase_evaluation(f"{root_dir}/data/halogenase/evaluation/datas.csv")
# parameters = get_reaction_parameters_resume_evaluation(f"{root_dir}/data/halogenase/evaluation/reactions.csv")
# get_reaction_feature(parameters_dict=parameters, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=True, resume=True)
# Create reaction features for small family dataset
# for enzyme_tag in ["Duf", "Esterase", "Phosphatase", "Gt_acceptor", "Nitrilase", "Thiolase"]:
# df_path = f"{root_dir}/data/small_family/{enzyme_tag}/data.csv"
# save_lmdb_path = f"{root_dir}/data/small_family/{enzyme_tag}/reaction_features.lmdb"
# save_df_path = f"{root_dir}/data/small_family/{enzyme_tag}/reactions.csv"
# parameters = get_reaction_parameters_small_family(df_path)
# get_reaction_feature(parameters_dict=parameters, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=True, resume=False)
# Create reaction features for new Brenda
# df_path = f"{data_root_dir}/data/small_family/2023_brenda/data.csv"
# save_lmdb_path = f"{data_root_dir}/data/small_family/2023_brenda/reaction_features.lmdb"
# save_df_path = f"{data_root_dir}/data/small_family/2023_brenda/reactions.csv"
# parameters = get_reaction_parameters_new_brenda(df_path)
# get_reaction_feature(parameters_dict=parameters, save_lmdb_path=save_lmdb_path, save_df_path=save_df_path, allow_no_match=True, resume=False)
pass | Python |
3D | antecede/EZSpecificity | Datasets/Experiment/experimental_evaluate_preprocess.ipynb | .ipynb | 22,721 | 660 | {
"cells": [
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"root_dir = \"/projects/bbhh/suyufeng/enzyme_specificity\""
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create substrate embedding"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"1. grover embedding"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 89/89 [00:00<00:00, 5743.06it/s]\n"
]
},
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"# Grover embedding:\n",
"import os\n",
"# 1. Get npz feature\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/halogenase/evaluation/reactions.csv\", sep=',')\n",
"results = [smile for smile in df['substrates']]\n",
"data = {\n",
" \"substrates\": results\n",
"}\n",
"data = pd.DataFrame(data)\n",
"data.to_csv(f\"{root_dir}/data/halogenase/evaluation/substrates.csv\", index=False)\n",
"\n",
"\n",
"os.system(f\"python {root_dir}/src/other_softwares/grover_software/scripts/save_features.py --data_path {root_dir}/data/halogenase/evaluation/substrates.csv \\\n",
" --save_path {root_dir}/data/halogenase/evaluation/substrates.npz \\\n",
" --features_generator fgtasklabel \\\n",
" --restart\")"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Building atom vocab from file: /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/substrates.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"50000it [00:00, 125271.16it/s] \n",
" 0%| | 0/90 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"atom vocab size 79\n",
"Building bond vocab from file: /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/substrates.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"50000it [00:00, 131732.14it/s] \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"bond vocab size 78\n"
]
},
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 58,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 2. Get build vocab\n",
"os.system(f\"python {root_dir}/src/other_softwares/grover_software/scripts/build_vocab.py --data_path {root_dir}/data/halogenase/evaluation/substrates.csv \\\n",
" --vocab_save_folder {root_dir}/data/halogenase/evaluation/grover_vocab \\\n",
" --dataset_name halogenase\")"
]
},
{
"cell_type": "code",
"execution_count": 59,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CUDA_VISIBLE_DEVICES=0 python main.py fingerprint --data_path /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/substrates.csv --features_path /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/substrates.npz --checkpoint_path /work/yufeng/2022/enzyme_specificity/data/pretrain_model/grover_large.pt --fingerprint_source both --output /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/fingerprint.npz --save_lmdb_path /work/yufeng/2022/enzyme_specificity/data/halogenase/evaluation/grover_fingerprint.lmdb --fingerprint_source both\n"
]
}
],
"source": [
"# 3. Get fingerprint\n",
"print(f\"CUDA_VISIBLE_DEVICES=0 python main.py fingerprint --data_path {root_dir}/data/halogenase/evaluation/substrates.csv --features_path {root_dir}/data/halogenase/evaluation/substrates.npz --checkpoint_path {root_dir}/data/pretrain_model/grover_large.pt --fingerprint_source both --output {root_dir}/data/halogenase/evaluation/fingerprint.npz --save_lmdb_path {root_dir}/data/halogenase/evaluation/grover_fingerprint.lmdb --fingerprint_source both\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"2. Morgen fingerprint"
]
},
{
"cell_type": "code",
"execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
"from rdkit.Chem import AllChem\n",
"from rdkit import Chem\n",
"import numpy as np\n",
"path = f\"{root_dir}/data/halogenase/evaluation/substrates.csv\"\n",
"df = pd.read_csv(path, sep=',')\n",
"results = []\n",
"for smile in df['substrates']:\n",
" m1 = Chem.MolFromSmiles(smile)\n",
" result = np.array(AllChem.GetMorganFingerprintAsBitVect(m1,2,nBits=1024))\n",
" results.append(result)\n",
"np.save(f\"{root_dir}/data/halogenase/evaluation/morgan_fingerprint.npy\", np.array(results))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"3. Check correctness"
]
},
{
"cell_type": "code",
"execution_count": 34,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor(0.0003)\n"
]
}
],
"source": [
"import torch\n",
"import lmdb\n",
"import pickle\n",
"exp_reaction_db = lmdb.open(\n",
" f\"{root_dir}/data/halogenase/grover_fingerprint.lmdb\",\n",
" map_size=600*(1024*1024*1024), # 600GB\n",
" create=False,\n",
" subdir=False,\n",
" readonly=True,\n",
" lock=False,\n",
" readahead=False,\n",
" meminit=False,\n",
")\n",
"reaction_db = lmdb.open(\n",
" f\"{root_dir}/data/halogenase/evaluation/grover_fingerprint.lmdb\",\n",
" map_size=600*(1024*1024*1024), # 600GB\n",
" create=False,\n",
" subdir=False,\n",
" readonly=True,\n",
" lock=False,\n",
" readahead=False,\n",
" meminit=False,\n",
")\n",
"with exp_reaction_db.begin(write=False) as txn:\n",
" key = str(0).encode()\n",
" value = txn.get(key)\n",
" grover_data = pickle.loads(value)\n",
" a1 = torch.from_numpy(grover_data['total_embedding'][np.newaxis, :])\n",
"with reaction_db.begin(write=False) as txn:\n",
" key = str(93).encode()\n",
" value = txn.get(key)\n",
" grover_data = pickle.loads(value)\n",
" a2 = torch.from_numpy(grover_data['total_embedding'][np.newaxis, :])\n",
"\n",
"print(torch.sum(torch.abs(a1-a2)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create data df"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import os\n",
"root_dir = \"/projects/bbto/suyufeng/enzyme_specificity\"\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/small_family/experiment/datas.csv\", sep=',')\n",
"reaction_df = pd.read_csv(f\"{root_dir}/data/small_family/experiment/substrates.csv\", sep=',')\n",
"reaction_dict = {reaction: index for index, reaction in enumerate(reaction_df['substrates'].values)}\n",
"\n",
"enzyme_df = pd.read_csv(f\"{root_dir}/data/small_family/experiment/enzymes.csv\", sep=',')\n",
"\n",
"# def get_label(datas):\n",
"# label = 2\n",
"# for data in datas:\n",
"# data = str(data)\n",
"# if data == '1':\n",
"# label = 1\n",
"# if data == '0' and label == 2:\n",
"# label = 0\n",
"# return label\n",
"\n",
"data = {\n",
" \"reaction\": [],\n",
" \"enzyme\": [],\n",
" \"label\": [],\n",
" \"index\": []\n",
"}\n",
"\n",
"detailed = {\n",
" \"reaction\": [],\n",
" \"enzyme\": [],\n",
" \"label\": [],\n",
" \"index\": []\n",
"}\n",
"# print(df[\"P95480 (cl)\"][13])\n",
"\n",
"data_cnt = 0\n",
"for index, (smile, substrate_i) in enumerate(zip(df['Canonical_SMILES'], df['Substrate_Libarary'])):\n",
" sub_index = reaction_dict[smile]\n",
" for enzyme_index, enzyme_tag in enumerate(enzyme_df['uniprots']):\n",
" # datas = []\n",
" # for colume in enzmye_column[enzyme]:\n",
" # datas.append(df[colume][index])\n",
" # label = get_label(datas)\n",
" label = 0\n",
" if label != 2:\n",
" data[\"reaction\"].append(sub_index)\n",
" data[\"enzyme\"].append(enzyme_index)\n",
" data[\"label\"].append(label)\n",
" data[\"index\"].append(data_cnt)\n",
" \n",
" detailed[\"reaction\"].append(substrate_i)\n",
" detailed[\"enzyme\"].append(enzyme_tag)\n",
" detailed[\"label\"].append(label)\n",
" detailed[\"index\"].append(data_cnt)\n",
"\n",
" os.system(f\"cp {root_dir}/data/small_family/experiment/structure/af2/by_affinity/{enzyme_tag}_{substrate_i[-3:]}.pdb {root_dir}/data/small_family/experiment/structure/af2/structure/{data_cnt}.pdb\")\n",
" data_cnt += 1\n",
"\n",
"data = pd.DataFrame(data)\n",
"detailed = pd.DataFrame(detailed)\n",
"\n",
"data.to_csv(f\"{root_dir}/data/small_family/experiment/big_datas.csv\", index=False)\n",
"detailed.to_csv(f\"{root_dir}/data/small_family/experiment/detailed_datas.csv\", index=False)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Prepare for structure"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 744/744 [00:05<00:00, 131.21it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"<rdkit.Chem.rdchem.Mol object at 0x7fe32cab7680>\n",
"0\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
}
],
"source": [
"from Bio.PDB import *\n",
"from rdkit import Chem\n",
"from tqdm import tqdm\n",
"import glob\n",
"import os\n",
"\n",
"def distance(x1, y1, z1, x2, y2, z2):\n",
" return (1. * (x2 - x1) ** 2 + 1. * (y2 - y1) ** 2 + 1. * (z2 - z1) ** 2) ** 0.5\n",
"\n",
"bad_molecular = 0\n",
"for name in tqdm(glob.glob(f\"{root_dir}/data/small_family/experiment/structure/af2/structure/*.pdb\")):\n",
" id = int(name.split(\"/\")[-1].split(\".pdb\")[0])\n",
" pocket_out_path = f\"{root_dir}/data/small_family/experiment/structure/af2/pocket/{id}.pdb\"\n",
" ligand_out_path = f\"{root_dir}/data/small_family/experiment/structure/af2/raw_ligand/{id}.sdf\"\n",
" lines = []\n",
" ed = 0\n",
" for index, line in enumerate(open(name, \"r\")):\n",
" lines.append(line)\n",
" if \"COMPND\" in line:\n",
" ed = index\n",
" \n",
" protein_lines = lines[:ed]\n",
" ligand_lines = lines[ed+1:]\n",
"\n",
"\n",
" mol = Chem.MolFromPDBBlock(\"\".join(ligand_lines),\n",
" sanitize=False,\n",
" removeHs=True)\n",
" ligand_coords = []\n",
" # mol = Chem.RemoveHs(mol, sanitize=False)\n",
" for i, atom in enumerate(mol.GetAtoms()):\n",
" positions = mol.GetConformer().GetAtomPosition(i)\n",
" ligand_coords.append((positions.x, positions.y, positions.z))\n",
" \n",
" try:\n",
" writer = Chem.SDWriter(ligand_out_path)\n",
" writer.write(mol, confId=0)\n",
" except:\n",
" bad_molecular += 1\n",
" continue\n",
"\n",
" fin = open(pocket_out_path, \"w\")\n",
"\n",
" for line in protein_lines:\n",
" if \"ATOM\" in line:\n",
" try:\n",
" x = float(line[30:38])\n",
" y = float(line[38:46])\n",
" z = float(line[46:54])\n",
" for ligand_coord in ligand_coords:\n",
" if 'H' not in line[12:16].strip() and distance(x, y, z, ligand_coord[0], ligand_coord[1], ligand_coord[2]) < 10:\n",
" fin.write(line)\n",
" break\n",
" except:\n",
" continue\n",
" \n",
" elif \"HETATM\" in line or \"ENDMDL\" in line:\n",
" fin.write(line)\n",
" fin.close()\n",
"print(bad_molecular)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"713\n",
"718\n",
"716\n",
"717\n",
"714\n",
"712\n",
"715\n",
"719\n",
"8\n"
]
}
],
"source": [
"from rdkit.Chem import rdFMCS\n",
"from rdkit.Chem import Draw\n",
"from rdkit.Chem import AllChem,rdDepictor\n",
"from rdkit import Chem\n",
"import pandas as pd\n",
"import glob\n",
"from rdkit import RDLogger\n",
"\n",
"RDLogger.DisableLog('rdApp.*')\n",
"\n",
"def AssignBondOrdersFromTemplate(refmol, mol):\n",
" \"\"\" assigns bond orders to a molecule based on the\n",
" bond orders in a template molecule\n",
" Revised from AllChem.AssignBondOrderFromTemplate(refmol, mol)\n",
" \"\"\"\n",
" AllChem.AssignBondOrdersFromTemplate\n",
" refmol2 = Chem.rdchem.Mol(refmol)\n",
" mol2 = Chem.rdchem.Mol(mol)\n",
" # do the molecules match already?\n",
" matching = mol2.GetSubstructMatch(refmol2)\n",
" if not matching: # no, they don't match\n",
" # check if bonds of mol are SINGLE\n",
" for b in mol2.GetBonds():\n",
" if b.GetBondType() != Chem.BondType.SINGLE:\n",
" b.SetBondType(Chem.BondType.SINGLE)\n",
" b.SetIsAromatic(False)\n",
" # set the bonds of mol to SINGLE\n",
" for b in refmol2.GetBonds():\n",
" b.SetBondType(Chem.BondType.SINGLE)\n",
" b.SetIsAromatic(False)\n",
" # set atom charges to zero;\n",
" for a in refmol2.GetAtoms():\n",
" a.SetFormalCharge(0)\n",
" for a in mol2.GetAtoms():\n",
" a.SetFormalCharge(0)\n",
"\n",
" matching = mol2.GetSubstructMatches(refmol2, uniquify=False)\n",
" # do the molecules match now?\n",
" if matching:\n",
" if len(matching) > 1:\n",
" #logger.warning(\"More than one matching pattern found - picking one\")\n",
" pass\n",
" matchings=matching[:]\n",
" for matching in matchings:\n",
" #matching = matching[0] ## use each matching\n",
" # apply matching: set bond properties\n",
" for b in refmol.GetBonds():\n",
" atom1 = matching[b.GetBeginAtomIdx()]\n",
" atom2 = matching[b.GetEndAtomIdx()]\n",
" b2 = mol2.GetBondBetweenAtoms(atom1, atom2)\n",
" b2.SetBondType(b.GetBondType())\n",
" b2.SetIsAromatic(b.GetIsAromatic())\n",
" # apply matching: set atom properties\n",
" for a in refmol.GetAtoms():\n",
" a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])\n",
" a2.SetHybridization(a.GetHybridization())\n",
" a2.SetIsAromatic(a.GetIsAromatic())\n",
" a2.SetNumExplicitHs(a.GetNumExplicitHs())\n",
" a2.SetFormalCharge(a.GetFormalCharge())\n",
" try:\n",
" Chem.SanitizeMol(mol2)\n",
" if hasattr(mol2, '__sssAtoms'):\n",
" mol2.__sssAtoms = None # we don't want all bonds highlighted\n",
" break\n",
" except ValueError:\n",
" pass\n",
" # print(\"More than one matching pattern, Fail at this matching. Try next.\")\n",
" else:\n",
" raise ValueError(\"No matching found\")\n",
" return mol2\n",
"\n",
"def alignment_number_system(sdf, smile_mol):\n",
" \n",
" template = smile_mol\n",
" query = sdf\n",
" \n",
" # print(Chem.CanonSmiles(Chem.MolToSmiles(template)))\n",
" # print(Chem.CanonSmiles(Chem.MolToSmiles(query)))\n",
"\n",
"\n",
" mcs = rdFMCS.FindMCS([template, query])\n",
" patt = Chem.MolFromSmarts(mcs.smartsString)\n",
"\n",
" query_match = query.GetSubstructMatch(patt)\n",
" template_match = template.GetSubstructMatch(patt)\n",
"\n",
" result = [-1] * query.GetNumAtoms()\n",
"\n",
" for query_atom_id, template_atom_id in zip(query_match, template_match):\n",
" result[query_atom_id] = template_atom_id\n",
"\n",
" # Check if there is any atom not matched\n",
" for atom in query.GetAtoms():\n",
" assert atom.GetAtomicNum() == 1 or result[atom.GetIdx()] != -1\n",
"\n",
" return result\n",
"\n",
"def assign_idx(mol, idxs):\n",
" for atom, idx in zip(mol.GetAtoms(), idxs):\n",
" atom.SetAtomMapNum(idx)\n",
" return mol\n",
"\n",
"def mol_get_atomic_number(mol, atom_map=False):\n",
" result = [0] * mol.GetNumAtoms()\n",
" for atom in mol.GetAtoms():\n",
" if atom.GetAtomMapNum() != -1:\n",
" if atom_map:\n",
" result[atom.GetAtomMapNum()] = atom.GetAtomicNum()\n",
" else:\n",
" result[atom.GetIdx()] = atom.GetAtomicNum()\n",
" return result\n",
"\n",
"def check(mol, mol2):\n",
" for atom in mol.GetAtoms():\n",
" if atom.GetAtomMapNum() != -1:\n",
" id = atom.GetAtomMapNum()\n",
" \n",
" atom2 = mol2.GetAtomWithIdx(id)\n",
" if atom.GetAtomicNum() != atom2.GetAtomicNum():\n",
" return False\n",
" return True\n",
"\n",
"def view_difference(mol1, mol2):\n",
" mcs = rdFMCS.FindMCS([mol1,mol2])\n",
" mcs_mol = Chem.MolFromSmarts(mcs.smartsString)\n",
" match1 = mol1.GetSubstructMatch(mcs_mol)\n",
" target_atm1 = []\n",
" for atom in mol1.GetAtoms():\n",
" if atom.GetIdx() not in match1:\n",
" target_atm1.append(atom.GetIdx())\n",
" match2 = mol2.GetSubstructMatch(mcs_mol)\n",
" target_atm2 = []\n",
" for atom in mol2.GetAtoms():\n",
" if atom.GetIdx() not in match2:\n",
" target_atm2.append(atom.GetIdx())\n",
" return Draw.MolsToGridImage([mol1, mol2],highlightAtomLists=[target_atm1, target_atm2])\n",
"\n",
"df = pd.read_csv(f\"{root_dir}/data/small_family/experiment/big_datas.csv\", sep=',')\n",
"sub_df = pd.read_csv(f\"{root_dir}/data/small_family/experiment/reactions.csv\")\n",
"substrates = sub_df[\"substrates\"].values\n",
"\n",
"bad_molecular = 0\n",
"for sdf_path in glob.glob(f\"{root_dir}/data/small_family/experiment/structure/af2/raw_ligand/*.sdf\"):\n",
"\n",
" id = os.path.basename(sdf_path).split(\".\")[0]\n",
" # print(sdf_path)\n",
" # if id != \"2163\":\n",
" # continue\n",
"\n",
" substrate_id = int(df[\"reaction\"].values[int(id)])\n",
" # print(substrate_id)\n",
" smile = substrates[substrate_id]\n",
"\n",
" try:\n",
" mol = next(iter(Chem.SDMolSupplier(sdf_path, sanitize=True)))\n",
" mol = Chem.RemoveHs(mol)\n",
" except Exception as e:\n",
" mol = next(iter(Chem.SDMolSupplier(sdf_path, sanitize=False)))\n",
" mol = Chem.RemoveHs(mol, sanitize=False)\n",
"\n",
" # mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))\n",
" smile = Chem.MolToSmiles(Chem.MolFromSmiles(smile))\n",
" smile_mol = Chem.MolFromSmiles(smile)\n",
" # print(mol_get_atomic_number(smile_mol))\n",
" try:\n",
" aligned_idx = alignment_number_system(mol, smile_mol)\n",
" except:\n",
" \n",
" try:\n",
" mol = AssignBondOrdersFromTemplate(smile_mol, mol)\n",
" except ValueError:\n",
" print(f\"{id}\")\n",
" bad_molecular += 1\n",
" continue\n",
" \n",
" try:\n",
" aligned_idx = alignment_number_system(mol, smile_mol)\n",
" except:\n",
" print(id)\n",
" bad_molecular += 1\n",
" continue\n",
" mol = assign_idx(mol, aligned_idx)\n",
" # print(mol_get_atomic_number(mol, atom_map=True))\n",
" if not check(mol, smile_mol):\n",
" print(id)\n",
" w = Chem.SDWriter(f\"{root_dir}/data/small_family/experiment/structure/af2/ligand/{id}.sdf\")\n",
" w.write(mol)\n",
" w.close()\n",
"# # break\n",
"print(bad_molecular)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "revae",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
3D | antecede/EZSpecificity | Datasets/CPI/cpi.py | .py | 2,194 | 66 | import sys
import numpy as np
import torch
root_dir = "/work/yufeng/2022/enzyme_specificity"
sys.path.append(f"{root_dir}/src")
torch.multiprocessing.set_sharing_strategy('file_system')
import pandas as pd
import pytorch_lightning as pl
from torch_geometric.data import DataLoader
from rdkit import RDLogger
from Datasets.data_representer import get_representer
from Datasets.utils import read_datasets
RDLogger.DisableLog('rdApp.*')
class CPIDataset(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
self.config = config
self.train_df = pd.read_csv(config.data.train_data_path, sep=',')
self.val_df = pd.read_csv(config.data.val_data_path, sep=',')
self.test_df = pd.read_csv(config.data.test_data_path, sep=',')
self.batch_size = config.data.batch_size
self.n_worker = config.num_cpus
def train_dataloader(self):
data = get_representer(df=self.train_df, config=self.config)
return DataLoader(data, batch_size=self.batch_size, shuffle=True, num_workers=self.n_worker)
def val_dataloader(self):
data = get_representer(df=self.val_df, config=self.config)
return DataLoader(data, batch_size=self.batch_size, shuffle=False, num_workers=self.n_worker)
def test_dataloader(self):
data = get_representer(df=self.test_df, config=self.config)
return DataLoader(data, batch_size=self.batch_size, shuffle=False, num_workers=self.n_worker)
def load_config(path):
with open(path, 'r') as f:
return EasyDict(yaml.safe_load(f))
if __name__ == "__main__":
from easydict import EasyDict
import yaml
import warnings
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/work/yufeng/2022/enzyme_specificity/data/data.csv", sep=',')
config = load_config("/work/yufeng/2022/enzyme_specificity/src/Configs/specificity.yml")
reaction = Reaction(df=df, st=0, ed=len(df.index), config=config)
dataloader = DataLoader(reaction, batch_size=2, shuffle=True, num_workers=20)
for _ in dataloader:
print(_)
break
# print(len(reaction))
# print(reaction[0]) | Python |
3D | antecede/EZSpecificity | Datasets/Downloads/brenda_crawler.py | .py | 5,590 | 137 | from zeep import Client
import hashlib
import os
from tqdm import tqdm
import pandas as pd
class BrendaCrawler:
def __init__(self):
# if dataset is None:
# dataset = 'halogenase'
self.root_dir = "/projects/bbhh/suyufeng/enzyme_specificity"
# self.dataset = dataset
self.wsdl = "https://www.brenda-enzymes.org/soap/brenda_zeep.wsdl"
self.password = hashlib.sha256("Suyufeng971024".encode("utf-8")).hexdigest()
self.client = Client(self.wsdl)
# for name in ['enzyme', 'reaction']:
# if not os.path.exists(f"{self.root_dir}/data/{self.dataset}/{name}"):
# os.system(f"mkdir {self.root_dir}/data/{self.dataset}/{name}")
# self.reaction_download(self.client)
# self.sequence_download(self.client)
def ligand_preprocess_ans_string(self, strings):
# print(strings)
data = {
"name":[],
"organism":[],
"chebiId":[]
}
for string in strings:
data['name'].append(string['ligand'])
data['organism'].append(string['organism'])
data['chebiId'].append(string['ligandStructureId'])
data = pd.DataFrame(data = data)
return data
def ligand_download(self, values, save_dir=None):
client = self.client
if save_dir is None:
save_dir = f"{self.root_dir}/data/raw_data/ligand"
os.makedirs(f"{save_dir}", exist_ok=True)
for line in tqdm(values):
ecnumber = line
try:
parameters = ("yufengs2@illinois.edu",self.password,"ecNumber*{}".format(ecnumber), "role*", "ligand*", "organism*", "ligandStructureId*")
resultString = client.service.getLigands(*parameters)
ans = self.ligand_preprocess_ans_string(resultString)
ans = ans.drop_duplicates(subset=['name']).reset_index(drop=True)
ans.to_csv(f"{save_dir}/{ecnumber}.csv", sep=',', index=False)
except Exception as e:
continue
def reaction_preprocess_ans_string(self, strings):
data = {
"reaction":[],
"organism":[],
"substrate":[],
"ligand_id":[]
}
for string in strings:
data['substrate'].append(string['substrate'])
data['organism'].append(string['organism'])
data['ligand_id'].append(string['ligandStructureId'])
data['reaction'].append(string['reactionPartners'])
data = pd.DataFrame(data = data)
return data
def reaction_download(self, values, save_dir=None):
client = self.client
if save_dir is None:
save_dir = f"{self.root_dir}/data/raw_data/reaction"
os.makedirs(f"{save_dir}", exist_ok=True)
# lines = [line.strip() for line in fin]
for line in tqdm(values):
ecnumber = line
try:
# print("{}/data/reaction/{}.csv".format(self.root_dir, ecnumber))
parameters = ("yufengs2@illinois.edu",self.password,"ecNumber*{}".format(ecnumber), "substrate*", "reactionPartners*", "organism*", "ligandStructureId*")
resultString = client.service.getSubstrate(*parameters)
ans = self.reaction_preprocess_ans_string(resultString)
ans.to_csv(f"{save_dir}/{ecnumber}.csv", sep=',', index=False)
except Exception as e:
continue
# ans = ans.drop_duplicates(subset=['organism', 'substrate', 'reactionPartners']).reset_index(drop=True)
# ans.to_csv("../../../data/proteins/{}.csv".format(ecnumber), sep=',', index=False)
# break
def sequence_preprocess_ans_string(self, strings):
data = {
"sequence":[],
"organism":[],
"uniprot":[]
}
for string in strings:
data['sequence'].append(string['sequence'])
data['organism'].append(string['organism'])
data['uniprot'].append(string['firstAccessionCode'])
data = pd.DataFrame(data = data)
return data
def sequence_download(self, values, save_dir=None):
client = self.client
if save_dir is None:
save_dir = f"{self.root_dir}/data/raw_data/enzyme"
os.makedirs(f"{save_dir}", exist_ok=True)
for line in tqdm(values):
ecnumber = line.strip()
# try:
parameters = ("yufengs2@illinois.edu",self.password,"ecNumber*{}".format(ecnumber), "sequence*", "noOfAminoAcids*", "firstAccessionCode*", "source*", "id*", "organism*")
resultString = client.service.getSequence(*parameters)
ans = self.sequence_preprocess_ans_string(resultString)
# ans = ans.drop_duplicates(subset=['organism']).reset_index(drop=True)
ans.to_csv(f"{save_dir}/{ecnumber}.csv", sep=',', index=False)
if ecnumber == '1.1.1.1':
print(ans)
break
# except Exception as e:
# continue
if __name__ == '__main__':
root_dir = "/projects/bbhh/suyufeng/enzyme_specificity"
import sys
sys.path.append(f"{root_dir}/src")
from Datasets.Downloads.brenda_crawler import BrendaCrawler
import pandas as pd
data = pd.read_csv(f"{root_dir}/data/brenda/ecnumbers.csv", sep=',')
t = BrendaCrawler()
# t.reaction_download(data['ecnumbers'].values)
# t.ligand_download(data['ecnumbers'].values)
t.sequence_download(data['ecnumbers'].values) | Python |
3D | antecede/EZSpecificity | Datasets/Structure/protein_ligand.py | .py | 14,563 | 376 | import os
import numpy as np
from rdkit import Chem
from rdkit.Chem.rdchem import BondType, HybridizationType
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
import torch
import torch.nn.functional as F
from torch_scatter import scatter
ATOM_FAMILIES = ['Acceptor', 'Donor', 'Aromatic', 'Hydrophobe', 'LumpedHydrophobe', 'NegIonizable', 'PosIonizable', 'ZnBinder']
ATOM_FAMILIES_ID = {s: i for i, s in enumerate(ATOM_FAMILIES)}
ATOM_FEATS = {'AtomicNumber': 1, 'Aromatic': 1, 'Degree': 6, 'NumHs': 6, 'Hybridization': len(HybridizationType.values)}
BOND_TYPES = {t: i for i, t in enumerate(BondType.names.values())}
BOND_NAMES = {i: t for i, t in enumerate(BondType.names.keys())}
class PDBProtein(object):
AA_NAME_SYM = {
'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G', 'HIS': 'H',
'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q',
'ARG': 'R', 'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y',
'UNK': 'X'
}
AA_NAME_NUMBER = {
k: i for i, (k, _) in enumerate(AA_NAME_SYM.items())
}
BACKBONE_NAMES = ["CA", "C", "N", "O"]
def __init__(self, data, mode='auto'):
super().__init__()
self.fn = data
if (data[-4:].lower() == '.pdb' and mode == 'auto') or mode == 'path':
with open(data, 'r') as f:
self.block = f.read()
else:
self.block = data
self.ptable = Chem.GetPeriodicTable()
# Molecule properties
self.title = None
# Atom properties
self.atoms = []
self.element = []
self.atomic_weight = []
self.pos = []
self.atom_name = []
self.is_backbone = []
self.atom_to_aa_type = []
# Residue properties
self.residues = []
self.amino_acid = []
self.center_of_mass = []
self.pos_CA = []
self.pos_C = []
self.pos_N = []
self.pos_O = []
self._parse()
def _enum_formatted_atom_lines(self):
for line in self.block.splitlines():
if line[0:6].strip() == 'ATOM':
element_symb = line[76:78].strip().capitalize()
if len(element_symb) == 0:
element_symb = line[13:14]
yield {
'line': line,
'type': 'ATOM',
'atom_id': int(line[6:11]),
'atom_name': line[12:16].strip(),
'res_name': line[17:20].strip(),
'chain': line[21:22].strip(),
'res_id': int(line[22:26]),
'res_insert_id': line[26:27].strip(),
'x': float(line[30:38]),
'y': float(line[38:46]),
'z': float(line[46:54]),
'occupancy': float(line[54:60]),
'segment': line[72:76].strip(),
'element_symb': element_symb,
'charge': line[78:80].strip(),
}
elif line[0:6].strip() == 'HEADER':
yield {
'type': 'HEADER',
'value': line[10:].strip()
}
elif line[0:6].strip() == 'ENDMDL':
break # Some PDBs have more than 1 model.
def _parse(self):
# Process atoms
residues_tmp = {}
for atom in self._enum_formatted_atom_lines():
if atom['type'] == 'HEADER':
self.title = atom['value'].lower()
continue
self.atoms.append(atom)
atomic_number = self.ptable.GetAtomicNumber(atom['element_symb'])
next_ptr = len(self.element)
self.element.append(atomic_number)
self.atomic_weight.append(self.ptable.GetAtomicWeight(atomic_number))
self.pos.append(np.array([atom['x'], atom['y'], atom['z']], dtype=np.float32))
self.atom_name.append(atom['atom_name'])
self.is_backbone.append(atom['atom_name'] in self.BACKBONE_NAMES)
if atom['res_name'] not in self.AA_NAME_NUMBER:
self.atom_to_aa_type.append(self.AA_NAME_NUMBER['UNK'])
else:
self.atom_to_aa_type.append(self.AA_NAME_NUMBER[atom['res_name']])
chain_res_id = '%s_%s_%d_%s' % (atom['chain'], atom['segment'], atom['res_id'], atom['res_insert_id'])
if chain_res_id not in residues_tmp:
residues_tmp[chain_res_id] = {
'name': atom['res_name'],
'atoms': [next_ptr],
'chain': atom['chain'],
'segment': atom['segment'],
}
else:
assert residues_tmp[chain_res_id]['name'] == atom['res_name']
assert residues_tmp[chain_res_id]['chain'] == atom['chain']
residues_tmp[chain_res_id]['atoms'].append(next_ptr)
# Process residues
self.residues = [r for _, r in residues_tmp.items()]
for residue in self.residues:
sum_pos = np.zeros([3], dtype=np.float32)
sum_mass = 0.0
for atom_idx in residue['atoms']:
sum_pos += self.pos[atom_idx] * self.atomic_weight[atom_idx]
sum_mass += self.atomic_weight[atom_idx]
if self.atom_name[atom_idx] in self.BACKBONE_NAMES:
residue['pos_%s' % self.atom_name[atom_idx]] = self.pos[atom_idx]
residue['center_of_mass'] = sum_pos / sum_mass
# Process backbone atoms of residues
for residue in self.residues:
if residue['name'] not in self.AA_NAME_NUMBER:
self.amino_acid.append(self.AA_NAME_NUMBER['UNK'])
else:
self.amino_acid.append(self.AA_NAME_NUMBER[residue['name']])
self.center_of_mass.append(residue['center_of_mass'])
for name in self.BACKBONE_NAMES:
pos_key = 'pos_%s' % name # pos_CA, pos_C, pos_N, pos_O
if pos_key in residue:
getattr(self, pos_key).append(residue[pos_key])
else:
getattr(self, pos_key).append(residue['center_of_mass'])
def to_dict_atom(self):
return {
'element': np.array(self.element, dtype=int),
'molecule_name': self.title,
'pos': np.array(self.pos, dtype=np.float32),
'is_backbone': np.array(self.is_backbone, dtype=bool),
'atom_name': self.atom_name,
'atom_to_aa_type': np.array(self.atom_to_aa_type, dtype=int)
}
def to_dict_residue(self):
return {
'amino_acid': np.array(self.amino_acid, dtype=int),
'center_of_mass': np.array(self.center_of_mass, dtype=np.float32),
'pos_CA': np.array(self.pos_CA, dtype=np.float32),
'pos_C': np.array(self.pos_C, dtype=np.float32),
'pos_N': np.array(self.pos_N, dtype=np.float32),
'pos_O': np.array(self.pos_O, dtype=np.float32),
}
def query_residues_radius(self, center, radius, criterion='center_of_mass'):
center = np.array(center).reshape(3)
selected = []
for residue in self.residues:
distance = np.linalg.norm(residue[criterion] - center, ord=2)
print(residue[criterion], distance)
if distance < radius:
selected.append(residue)
return selected
def query_residues_ligand(self, ligand, radius, criterion='center_of_mass'):
selected = []
sel_idx = set()
# The time-complexity is O(mn).
for center in ligand['pos']:
for i, residue in enumerate(self.residues):
distance = np.linalg.norm(residue[criterion] - center, ord=2)
if distance < radius and i not in sel_idx:
selected.append(residue)
sel_idx.add(i)
return selected
def residues_to_pdb_block(self, residues, name='POCKET'):
block = "HEADER %s\n" % name
block += "COMPND %s\n" % name
for residue in residues:
for atom_idx in residue['atoms']:
block += self.atoms[atom_idx]['line'] + "\n"
block += "END\n"
return block
def get_zero_protein_feature():
return {
'element': np.array([], dtype=int),
'molecule_name': 'None',
'pos': np.array([], dtype=np.float32),
'is_backbone': np.array([], dtype=bool),
'atom_name': [],
'atom_to_aa_type': np.array([], dtype=int)
}
def get_ligand_atom_features(rdmol):
num_atoms = rdmol.GetNumAtoms()
atomic_number = []
aromatic = []
# sp, sp2, sp3 = [], [], []
hybrid = []
degree = []
for atom_idx in range(num_atoms):
atom = rdmol.GetAtomWithIdx(atom_idx)
atomic_number.append(atom.GetAtomicNum())
aromatic.append(1 if atom.GetIsAromatic() else 0)
hybridization = atom.GetHybridization()
HYBRID_TYPES = {t: i for i, t in enumerate(HybridizationType.names.values())}
hybrid.append(HYBRID_TYPES[hybridization])
# sp.append(1 if hybridization == HybridizationType.SP else 0)
# sp2.append(1 if hybridization == HybridizationType.SP2 else 0)
# sp3.append(1 if hybridization == HybridizationType.SP3 else 0)
degree.append(atom.GetDegree())
node_type = torch.tensor(atomic_number, dtype=torch.long)
row, col = [], []
for bond in rdmol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
row = torch.tensor(row, dtype=torch.long)
col = torch.tensor(col, dtype=torch.long)
hs = (node_type == 1).to(torch.float)
num_hs = scatter(hs[row], col, dim_size=num_atoms).numpy()
# need to change ATOM_FEATS accordingly
feat_mat = np.array([atomic_number, aromatic, degree, num_hs, hybrid], dtype=int).transpose()
return feat_mat
# used for fixing some errors in sdf file
def parse_sdf_file_text(path):
with open(path, 'r') as f:
sdf = f.read()
sdf = sdf.splitlines()
num_atoms, num_bonds = map(int, [sdf[3][0:3], sdf[3][3:6]])
ptable = Chem.GetPeriodicTable()
element, pos = [], []
accum_pos = np.array([0.0, 0.0, 0.0], dtype=np.float32)
accum_mass = 0.0
for atom_line in map(lambda x:x.split(), sdf[4:4+num_atoms]):
x, y, z = map(float, atom_line[:3])
symb = atom_line[3]
atomic_number = ptable.GetAtomicNumber(symb.capitalize())
element.append(atomic_number)
pos.append([x, y, z])
atomic_weight = ptable.GetAtomicWeight(atomic_number)
accum_pos += np.array([x, y, z]) * atomic_weight
accum_mass += atomic_weight
center_of_mass = np.array(accum_pos / accum_mass, dtype=np.float32)
element = np.array(element, dtype=int)
pos = np.array(pos, dtype=np.float32)
BOND_TYPES = {t: i for i, t in enumerate(BondType.names.values())}
bond_type_map = {
1: BOND_TYPES[BondType.SINGLE],
2: BOND_TYPES[BondType.DOUBLE],
3: BOND_TYPES[BondType.TRIPLE],
4: BOND_TYPES[BondType.AROMATIC],
8: BOND_TYPES[BondType.UNSPECIFIED]
}
row, col, edge_type = [], [], []
for bond_line in sdf[4+num_atoms:4+num_atoms+num_bonds]:
start, end = int(bond_line[0:3])-1, int(bond_line[3:6])-1
row += [start, end]
col += [end, start]
edge_type += 2 * [bond_type_map[int(bond_line[6:9])]]
edge_index = np.array([row, col], dtype=int)
edge_type = np.array(edge_type, dtype=int)
perm = (edge_index[0] * num_atoms + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
data = {
'element': element,
'pos': pos,
'bond_index': edge_index,
'bond_type': edge_type,
'center_of_mass': center_of_mass
}
return data
# used for preparing the dataset
def parse_sdf_file_mol(path, mol=None, heavy_only=True):
if mol is None:
mol = next(iter(Chem.SDMolSupplier(path, removeHs=heavy_only, sanitize=False)))
feat_mat = get_ligand_atom_features(mol)
# fdefName = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
# factory = ChemicalFeatures.BuildFeatureFactory(fdefName)
# rdmol = next(iter(Chem.SDMolSupplier(path, removeHs=heavy_only)))
# rd_num_atoms = rdmol.GetNumAtoms()
# feat_mat = np.zeros([rd_num_atoms, len(ATOM_FAMILIES)], dtype=int)
# for feat in factory.GetFeaturesForMol(rdmol):
# feat_mat[feat.GetAtomIds(), ATOM_FAMILIES_ID[feat.GetFamily()]] = 1
ptable = Chem.GetPeriodicTable()
num_atoms = mol.GetNumAtoms()
num_bonds = mol.GetNumBonds()
pos = mol.GetConformer().GetPositions()
element = []
indexs = []
accum_pos = np.array([0.0, 0.0, 0.0], dtype=np.float32)
accum_mass = 0.0
for atom_idx in range(num_atoms):
atom = mol.GetAtomWithIdx(atom_idx)
atomic_number = atom.GetAtomicNum()
element.append(atomic_number)
indexs.append(atom.GetAtomMapNum())
x, y, z = pos[atom_idx]
atomic_weight = ptable.GetAtomicWeight(atomic_number)
accum_pos += np.array([x, y, z]) * atomic_weight
accum_mass += atomic_weight
center_of_mass = np.array(accum_pos / accum_mass, dtype=np.float32)
element = np.array(element, dtype=int)
pos = np.array(pos, dtype=np.float32)
indexs = np.array(indexs, dtype=int)
row, col, edge_type = [], [], []
BOND_TYPES = {}
BOND_TYPES[BondType.SINGLE] = 1
BOND_TYPES[BondType.DOUBLE] = 2
BOND_TYPES[BondType.TRIPLE] = 3
BOND_TYPES[BondType.AROMATIC] = 4
BOND_TYPES[BondType.UNSPECIFIED] = 5
for bond in mol.GetBonds():
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
row += [start, end]
col += [end, start]
edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
edge_index = np.array([row, col], dtype=int)
edge_type = np.array(edge_type, dtype=int)
perm = (edge_index[0] * num_atoms + edge_index[1]).argsort()
edge_index = edge_index[:, perm]
edge_type = edge_type[perm]
data = {
'element': element,
'pos': pos,
'bond_index': edge_index,
'bond_type': edge_type,
'center_of_mass': center_of_mass,
'atom_feature': feat_mat,
'index': indexs
}
return data | Python |
3D | antecede/EZSpecificity | Datasets/Structure/__init__.py | .py | 106 | 2 | from Datasets.Structure.structure import StructureDataset
from Datasets.Structure.collator import collator | Python |
3D | antecede/EZSpecificity | Datasets/Structure/utils.py | .py | 1,934 | 62 | import copy
import torch
import numpy as np
from torch_geometric.data import Data, DataLoader, Batch
FOLLOW_BATCH = ['protein_element', 'ligand_context_element', 'pos_real', 'pos_fake']
class StructureComplexData(Data):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def from_protein_ligand_dicts(protein_dict=None, ligand_dict=None, **kwargs):
instance = StructureComplexData(**kwargs)
if protein_dict is not None:
for key, item in protein_dict.items():
instance['protein_' + key] = item
if ligand_dict is not None:
for key, item in ligand_dict.items():
instance['ligand_' + key] = item
# instance['ligand_nbh_list'] = {i.item():[j.item() for k, j in enumerate(instance.ligand_bond_index[1])
# if instance.ligand_bond_index[0, k].item() == i] for i in instance.ligand_bond_index[0]}
return instance
def __inc__(self, key, value, *args, **kwargs):
if key == 'complex_edge_index':
return self['mask_ligand'].size(0)
else:
return super().__inc__(key, value, *args, **kwargs)
# class StructureComplexDataLoader(DataLoader):
# def __init__(
# self,
# dataset,
# batch_size=1,
# shuffle=False,
# follow_batch=['ligand_element', 'protein_element'],
# **kwargs
# ):
# super().__init__(dataset, batch_size=batch_size, shuffle=shuffle, follow_batch=follow_batch, **kwargs)
def batch_from_data_list(data_list):
return Batch.from_data_list(data_list, follow_batch=['ligand_element', 'protein_element'])
def torchify_dict(data):
output = {}
for k, v in data.items():
if isinstance(v, np.ndarray):
output[k] = torch.from_numpy(v)
else:
output[k] = v
return output | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.