repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
mff | mff-master/mff/kernels/base.py | import os
from abc import ABCMeta, abstractmethod
from pathlib import Path
path = Path(os.path.abspath(__file__))
Mffpath = path.parent.parent / "cache/"
class Kernel(metaclass=ABCMeta):
@abstractmethod
def __init__(self, kernel_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kernel_name = kernel_name
| 345 | 23.714286 | 53 | py |
mff | mff-master/mff/kernels/twobodykernel.py | # -*- coding: utf-8 -*-
import logging
import os.path
import pickle
from abc import ABCMeta, abstractmethod
import numpy as np
from mff.kernels.base import Kernel, Mffpath
logger = logging.getLogger(__name__)
def dummy_calc_ff(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k2_ff_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k2_ff_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3, 3))
for i in np.arange(len(array)):
result[i] = fun(np.zeros(3), np.zeros(3), array[i][0],
array[i][1], theta0, theta1, theta2)
return result
def dummy_calc_ee(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping = data
if kertype == "single":
with open(Mffpath / "k2_ee_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k2_ee_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros(len(array))
if not mapping:
for i in np.arange(len(array)):
for conf1 in array[i][0]:
for conf2 in array[i][1]:
result[i] += 0.25*fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
else:
for i in np.arange(len(array)):
for conf2 in array[i][1]:
result[i] += 0.5*fun(np.zeros(3), np.zeros(3),
array[i][0], conf2, theta0, theta1, theta2)
return result
def dummy_calc_ef(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping = data
if kertype == "single":
with open(Mffpath / "k2_ef_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k2_ef_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3))
if not mapping:
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
for conf1 in array[i][0]:
conf1 = np.array(conf1, dtype='float')
result[i] += -0.5*fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
else:
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
conf1 = np.array(array[i][0], dtype='float')
result[i] += -fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
return result
class BaseTwoBody(Kernel, metaclass=ABCMeta):
""" Two body kernel class
Handles the functions common to the single-species and
multi-species two-body kernels.
Args:
kernel_name (str): To choose between single- and two-species kernel
theta[0] (float) : lengthscale of the kernel
theta[1] (float) : decay rate of the cutoff function
theta[2] (float) : cutoff radius
bounds (list) : bounds of the kernel function.
Attributes:
k2_ee (object): Energy-energy kernel function
k2_ef (object): Energy-force kernel function
k2_ff (object): Force-force kernel function
"""
@abstractmethod
def __init__(self, kernel_name, theta, bounds):
super().__init__(kernel_name)
self.theta = theta
self.bounds = bounds
self.k2_ee, self.k2_ef, self.k2_ff = self.compile_theano()
def calc(self, X1, X2, ncores=1):
"""
Calculate the force-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X1) * 3, len(X2) * 3))
if ncores > 1:
confs = []
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body force-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i * 3: i * 3 + 3, 3 * j:3 * j +
3] = result[(j + i * len(X2))]
else:
for i, conf1 in enumerate(X1):
for j, conf2 in enumerate(X2):
ker[i * 3:i * 3 + 3, 3 * j:3 * j + 3] += self.k2_ff(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ef(self, X_glob, X, ncores=1, mapping=False):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X_glob), len(X) * 3))
if ncores > 1:
confs = []
for x1 in X_glob:
for x2 in X:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body energy-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in range(len(X_glob)):
for j in range(len(X)):
ker[i, 3 * j:3 * j + 3] = result[(j + i * len(X))]
else:
if not mapping:
for i, x1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
for conf1 in x1:
ker[i, 3 * j:3 * j + 3] += 0.5*self.k2_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
else:
for i, conf1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
ker[i, 3 * j:3 * j + 3] += self.k2_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ee(self, X1, X2, ncores=1, mapping=False):
"""
Calculate the energy-energy kernel between two global environments.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N1 x N2 matrix of the scalar-valued kernels
"""
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body energy-energy kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
ker = np.zeros((len(X1), len(X2)))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i, j] = result[j + i*len(X2)]
else:
if not mapping:
ker = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf1 in x1:
for conf2 in x2:
ker[i, j] += 0.25*self.k2_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
else:
ker = np.zeros((len(X1), len(X2)))
for i, conf1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf2 in x2:
ker[i, j] += 0.5*self.k2_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_gram(self, X, ncores=1, eval_gradient=False):
"""
Calculate the force-force gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N*3 x N*3 gram matrix of the matrix-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.asarray([X[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body force-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
off_diag = np.zeros((len(X) * 3, len(X) * 3))
diag = np.zeros((len(X) * 3, len(X) * 3))
for i in np.arange(len(X)):
diag[3 * i:3 * i + 3, 3 * i:3 * i +
3] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 *
j + 3] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
off_diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[3 * i:3 * i + 3, 3 * i:3 * i + 3] = \
self.k2_ff(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 * j + 3] = \
self.k2_ff(X[i], X[j], self.theta[0],
self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # The gram matrix is symmetric
return gram
def calc_gram_e(self, X, ncores=1, eval_gradient=False):
"""
Calculate the energy-energy gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N x N gram matrix of the scalar-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.array([list(X[i]), list(X[j])])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body energy-energy gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
off_diag = np.zeros((len(X), len(X)))
diag = np.zeros((len(X), len(X)))
for i in np.arange(len(X)):
diag[i, i] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[i, j] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0], X.shape[0]))
off_diag = np.zeros((X.shape[0], X.shape[0]))
for i in np.arange(X.shape[0]):
for k, conf1 in enumerate(X[i]):
diag[i, i] += 0.25*self.k2_ee(conf1, conf1,
self.theta[0], self.theta[1], self.theta[2])
for conf2 in X[i][:k]:
# *2 here to speed up the loop
diag[i, i] += 0.25*2.0*self.k2_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
for j in np.arange(i):
for conf1 in X[i]:
for conf2 in X[j]:
off_diag[i, j] += 0.25*self.k2_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # Gram matrix is symmetric
return gram
def calc_gram_ef(self, X, X_glob, ncores=1, eval_gradient=False):
"""
Calculate the energy-force gram matrix for a set of configurations X.
This returns a non-symmetric matrix which is equal to the transpose of
the force-energy gram matrix.
Args:
X (list): list of N1 M1x5 arrays containing xyz coordinates and atomic species
X_glob (list): list of N2 M2x5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N2 x N1*3 gram matrix of the vector-valued kernels
"""
gram = np.zeros((X_glob.shape[0], X.shape[0] * 3))
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Multiprocessing
confs = []
for i in np.arange(len(X_glob)):
for j in np.arange(len(X)):
thislist = np.asarray([X_glob[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 2-body energy-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
gram[i, 3 * j:3 * j + 3] = result[(j + i * X.shape[0])]
else:
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
for k in X_glob[i]:
gram[i, 3 * j:3 * j + 3] += 0.5*self.k2_ef(
k, X[j], self.theta[0], self.theta[1], self.theta[2])
self.gram_ef = gram
return gram
@staticmethod
@abstractmethod
def compile_theano():
return None, None, None
class TwoBodySingleSpeciesKernel(BaseTwoBody):
"""Two body single species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='TwoBodySingleSpecies', theta=theta, bounds=bounds)
self.type = "single"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for global energy and force learning
The position of the atoms relative to the central one, and their chemical species
are defined by a matrix of dimension Mx5 here called r1 and r2.
Returns:
k2_ee (func): energy-energy kernel
k2_ef (func): energy-force kernel
k2_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k2_ee_s.pickle') and
os.path.exists(Mffpath / 'k2_ef_s.pickle') and os.path.exists(Mffpath / 'k2_ff_s.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info(
"Started compilation of theano two body single species kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# lengthscale hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species (3D space assumed)
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
# distances of atoms wrt to the central one and wrt each other in 1 and 2
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
# squared exponential of the above distance matrices
se_jm = T.exp(-(r1j[:, None] - r2m[None, :]) ** 2 / (2 * sig ** 2))
cut_jm = 0.5*(1+T.cos(np.pi*r1j[:, None]/rc))*0.5*(1+T.cos(np.pi*r2m[None, :]/rc))*(
(T.sgn(rc-r1j[:, None]) + 1) / 2)*((T.sgn(rc-r2m[None, :]) + 1) / 2)
# apply the cutoff function to the squared exponential partial kernels
se_jm = se_jm*cut_jm
k = T.sum(se_jm)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# energy energy kernel
k_ee_fun = function([r1, r2, rho1, rho2, sig, theta, rc], k,
allow_input_downcast=False, on_unused_input='warn')
# energy force kernel - Used to predict energies from forces
k_ef = T.grad(k, r2)
k_ef_fun = function([r1, r2, rho1, rho2, sig, theta, rc], k_ef,
allow_input_downcast=False, on_unused_input='warn')
# force force kernel - it uses only local atom pairs to avoid useless computation
k_ff = T.grad(k, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function([r1, r2, rho1, rho2, sig, theta, rc], k_ff_der,
allow_input_downcast=False, on_unused_input='warn')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k2_ee_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k2_ef_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k2_ff_s.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k2_ee_s.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k2_ef_s.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k2_ff_s.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# --------------------------------------------------
# WRAPPERS (we don't want to plug the position of the central element every time)
# --------------------------------------------------
def k2_ee(conf1, conf2, sig, theta, rc):
"""
Two body kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy 2-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k2_ef(conf1, conf2, sig, theta, rc):
"""
Two body kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force 2-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k2_ff(conf1, conf2, sig, theta, rc):
"""
Two body kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 2-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info(
"Ended compilation of theano two body single species kernels")
return k2_ee, k2_ef, k2_ff
class TwoBodyManySpeciesKernel(BaseTwoBody):
"""Two body many species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='TwoBodyManySpecies', theta=theta, bounds=bounds)
self.type = "multi"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for global energy and force learning
The position of the atoms relative to the central one, and their chemical species
are defined by a matrix of dimension Mx5 here called r1 and r2.
Returns:
k2_ee (func): energy-energy kernel
k2_ef (func): energy-force kernel
k2_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k2_ee_m.pickle') and
os.path.exists(Mffpath / 'k2_ef_m.pickle') and os.path.exists(Mffpath / 'k2_ff_m.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano two body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# lengthscale hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species (3D space assumed)
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
alpha_1 = rho1[:, 3] # .flatten()
alpha_2 = rho2[:, 3] # .flatten()
alpha_j = rho1[:, 4] # .flatten()
alpha_m = rho2[:, 4] # .flatten()
# numerical kronecker
def delta_alpha2(a1j, a2m):
d = T.exp(-(a1j - a2m) ** 2 / (2 * 1e-5 ** 2))
return d
# matrices determining whether couples of atoms have the same atomic number
delta_alphas12 = delta_alpha2(alpha_1[:, None], alpha_2[None, :])
delta_alphasjm = delta_alpha2(alpha_j[:, None], alpha_m[None, :])
delta_alphas1m = delta_alpha2(alpha_1[:, None], alpha_m[None, :])
delta_alphasj2 = delta_alpha2(alpha_j[:, None], alpha_2[None, :])
# distances of atoms wrt to the central one and wrt each other in 1 and 2
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
# Get the squared exponential kernels
se_jm = T.exp(-(r1j[:, None] - r2m[None, :]) ** 2 / (2 * sig ** 2))
# Define cutoff function
cut_jm = 0.5*(1+T.cos(np.pi*r1j[:, None]/rc))*0.5*(1+T.cos(np.pi*r2m[None, :]/rc))*(
(T.sgn(rc-r1j) + 1) / 2)*((T.sgn(rc-r2m) + 1) / 2)
# Apply cutoffs and chemical species masks
se_jm = se_jm*cut_jm * \
(delta_alphas12 * delta_alphasjm + delta_alphas1m * delta_alphasj2)
ker = T.sum(se_jm)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# global energy energy kernel
k_ee_fun = function([r1, r2, rho1, rho2, sig, theta, rc], ker,
allow_input_downcast=False, on_unused_input='warn')
# energy force kernel - Used to predict energies from forces
k_ef = T.grad(ker, r2)
k_ef_fun = function([r1, r2, rho1, rho2, sig, theta, rc], k_ef,
allow_input_downcast=False, on_unused_input='warn')
# force force kernel - it uses only local atom pairs to avoid useless computation
k_ff = T.grad(ker, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function([r1, r2, rho1, rho2, sig, theta, rc], k_ff_der,
allow_input_downcast=False, on_unused_input='warn')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k2_ee_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k2_ef_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k2_ff_m.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k2_ee_m.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k2_ef_m.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k2_ff_m.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# # --------------------------------------------------
# # WRAPPERS (we don't want to plug the position of the central element every time)
# # --------------------------------------------------
def k2_ee(conf1, conf2, sig, theta, rc):
"""
Two body kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy 2-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k2_ef(conf1, conf2, sig, theta, rc):
"""
Two body kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force 2-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k2_ff(conf1, conf2, sig, theta, rc):
"""
Two body kernel for energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 2-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano two body kernels")
return k2_ee, k2_ef, k2_ff
| 36,239 | 39.995475 | 121 | py |
mff | mff-master/mff/kernels/threebodykernel.py | # -*- coding: utf-8 -*-
import logging
import os.path
import pickle
from abc import ABCMeta, abstractmethod
import numpy as np
from mff.kernels.base import Kernel, Mffpath
logger = logging.getLogger(__name__)
def dummy_calc_ff(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3, 3))
for i in range(len(array)):
result[i] = fun(np.zeros(3), np.zeros(3), array[i][0],
array[i][1], theta0, theta1, theta2)
return result
def dummy_calc_ee(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping = data
if kertype == "single":
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros(len(array))
if not mapping:
for i in range(len(array)):
for conf1 in array[i][0]:
for conf2 in array[i][1]:
result[i] += 1/9.0*fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
else:
for i in range(len(array)):
for conf2 in array[i][1]:
result[i] += 1/3.0*fun(np.zeros(3), np.zeros(3),
array[i][0], conf2, theta0, theta1, theta2)
return result
def dummy_calc_ef(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping = data
if kertype == "single":
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3))
if not mapping:
for i in range(len(array)):
conf2 = np.array(array[i][1], dtype='float')
for conf1 in array[i][0]:
conf1 = np.array(conf1, dtype='float')
result[i] += -1/3.0*fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
else:
for i in range(len(array)):
conf2 = np.array(array[i][1], dtype='float')
conf1 = np.array(array[i][0], dtype='float')
result[i] += -fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
return result
class BaseThreeBody(Kernel, metaclass=ABCMeta):
""" Three body kernel class
Handles the functions common to the single-species and
multi-species three-body kernels.
Args:
kernel_name (str): To choose between single- and two-species kernel
theta[0] (float) : lengthscale of the kernel
theta[1] (float) : decay rate of the cutoff function
theta[2] (float) : cutoff radius
bounds (list) : bounds of the kernel function.
Attributes:
k3_ee (object): Energy-energy kernel function
k3_ef (object): Energy-force kernel function
k3_ef_loc (object): Local Energy-force kernel function
k3_ff (object): Force-force kernel function
"""
@abstractmethod
def __init__(self, kernel_name, theta, bounds):
super().__init__(kernel_name)
self.theta = theta
self.bounds = bounds
self.k3_ee, self.k3_ef, self.k3_ff = self.compile_theano()
def calc(self, X1, X2, ncores=1):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X1) * 3, len(X2) * 3))
if ncores > 1:
confs = [[x1, x2] for x1 in X1 for x2 in X2]
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body force-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
del clist
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i * 3: i * 3 + 3, 3 * j:3 * j +
3] = result[(j + i * len(X2))]
del result
else:
for i, conf1 in enumerate(X1):
for j, conf2 in enumerate(X2):
ker[i * 3:i * 3 + 3, 3 * j:3 * j + 3] += self.k3_ff(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ef(self, X_glob, X, ncores=1, mapping=False):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X_glob), len(X) * 3))
if ncores > 1:
confs = [[x1, x2] for x1 in X_glob for x2 in X]
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
del clist
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in range(len(X_glob)):
for j in range(len(X)):
ker[i, 3 * j:3 * j + 3] = result[(j + i * len(X))]
else:
if not mapping:
for i, x1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
for conf1 in x1:
ker[i, 3 * j:3 * j + 3] += 1/3.0*self.k3_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
else:
for i, conf1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
ker[i, 3 * j:3 * j + 3] += self.k3_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ee(self, X1, X2, ncores=1, mapping=False):
"""
Calculate the energy-energy kernel between two global environments.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N1 x N2 matrix of the scalar-valued kernels
"""
if ncores > 1: # Used for multiprocessing
# Build a list of all input pairs which matrix needs to be computed
confs = [[x1, x2] for x1 in X1 for x2 in X2]
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-energy kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
del clist
pool.close()
pool.join()
result = np.concatenate(result).ravel()
ker = np.zeros((len(X1), len(X2)))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i, j] = result[j + i*len(X2)]
del result
else:
if not mapping:
ker = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf1 in x1:
for conf2 in x2:
ker[i, j] += 1/9.0*self.k3_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
else:
ker = np.zeros((len(X1), len(X2)))
for i, conf1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf2 in x2:
ker[i, j] += 1/3.0*self.k3_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_gram(self, X, ncores=1, eval_gradient=False):
"""
Calculate the force-force gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N*3 x N*3 gram matrix of the matrix-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
confs = [[X[i], X[j]] for i in range(len(X)) for j in range(i + 1)]
n = len(confs)
logger.info(
'Using %i cores for the 3-body force-force gram matrix calculation' % (ncores))
import sys
sys.setrecursionlimit(100000)
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
del clist
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
off_diag = np.zeros((len(X) * 3, len(X) * 3))
diag = np.zeros((len(X) * 3, len(X) * 3))
for i in range(len(X)):
diag[3 * i:3 * i + 3, 3 * i:3 * i +
3] = result[i + i * (i + 1) // 2]
for j in range(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 *
j + 3] = result[j + i * (i + 1) // 2]
del result
else:
diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
off_diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
for i in range(X.shape[0]):
diag[3 * i:3 * i + 3, 3 * i:3 * i + 3] = \
self.k3_ff(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
for j in range(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 * j + 3] = \
self.k3_ff(X[i], X[j], self.theta[0],
self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T
del diag, off_diag
return gram
def calc_gram_e(self, X, ncores=1, eval_gradient=False): # Untested
"""
Calculate the energy-energy gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N x N gram matrix of the scalar-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
# Build a list of all input pairs which matrix needs to be computed
confs = [[X[i], X[j]] for i in range(len(X)) for j in range(i + 1)]
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-energy gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
del clist
pool.close()
pool.join()
result = np.concatenate(result).ravel()
off_diag = np.zeros((len(X), len(X)))
diag = np.zeros((len(X), len(X)))
for i in range(len(X)):
diag[i, i] = result[i + i * (i + 1) // 2]
for j in range(i):
off_diag[i, j] = result[j + i * (i + 1) // 2]
del result
else:
diag = np.zeros((X.shape[0], X.shape[0]))
off_diag = np.zeros((X.shape[0], X.shape[0]))
for i in range(X.shape[0]):
for k, conf1 in enumerate(X[i]):
diag[i, i] += 1/9.0*self.k3_ee(conf1, conf1,
self.theta[0], self.theta[1], self.theta[2])
for conf2 in X[i][:k]:
# *2 here to speed up the loop
diag[i, i] += 1/9.0*2.0*self.k3_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
for j in range(i):
for conf1 in X[i]:
for conf2 in X[j]:
off_diag[i, j] += 1/9.0*self.k3_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T
del diag, off_diag
return gram
def calc_gram_ef(self, X, X_glob, ncores=1, eval_gradient=False):
"""
Calculate the energy-force gram matrix for a set of configurations X.
This returns a non-symmetric matrix which is equal to the transpose of
the force-energy gram matrix.
Args:
X (list): list of N1 M1x5 arrays containing xyz coordinates and atomic species
X_glob (list): list of N2 M2x5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N2 x N1*3 gram matrix of the vector-valued kernels
"""
gram = np.zeros((X_glob.shape[0], X.shape[0] * 3))
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Multiprocessing
confs = [[x1, x2] for x1 in X_glob for x2 in X]
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in range(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False] for i in range(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
del confs
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
del clist
pool.close()
pool.join()
result = np.concatenate(result).ravel()
for i in range(X_glob.shape[0]):
for j in range(X.shape[0]):
gram[i, 3 * j:3 * j + 3] = result[3 *
(j + i * X.shape[0]):3 + 3*(j + i * X.shape[0])]
del result
else:
for i in range(X_glob.shape[0]):
for j in range(X.shape[0]):
for k in X_glob[i]:
gram[i, 3 * j:3 * j + 3] += 1/3.0*self.k3_ef(
k, X[j], self.theta[0], self.theta[1], self.theta[2])
return gram
@staticmethod
@abstractmethod
def compile_theano():
return None, None, None
class ThreeBodySingleSpeciesKernel(BaseThreeBody):
"""Three body two species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ThreeBodySingleSpecies', theta=theta, bounds=bounds)
self.type = "single"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
k3_ee (func): energy-energy kernel
k3_ef (func): energy-force kernel
k3_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k3_ee_s.pickle') and
os.path.exists(Mffpath / 'k3_ef_s.pickle') and os.path.exists(Mffpath / 'k3_ff_s.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano three body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker = k1n + k2n + k3n
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))*((T.sgn(rc-r1j) + 1) / 2)
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))*((T.sgn(rc-r2m) + 1) / 2)
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))*((T.sgn(rc-rjk) + 1) / 2)
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))*((T.sgn(rc-rmn) + 1) / 2)
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS AND ADD CUTOFF
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker = ker * mask_jkmn
ker = T.sum(ker * cut_jk[:, :, None, None]
* cut_mn[None, None, :, :])
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# global energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker, on_unused_input='ignore')
# global energy force kernel
k_ef = T.grad(ker, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef, on_unused_input='ignore')
# local force force kernel
k_ff = T.grad(ker, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_s.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def k3_ee(conf1, conf2, sig, theta, rc):
"""
Three body kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy 3-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k3_ef(conf1, conf2, sig, theta, rc):
"""
Three body kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force 3-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k3_ff(conf1, conf2, sig, theta, rc):
"""
Three body kernel for local force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 3-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano three body kernels")
return k3_ee, k3_ef, k3_ff
class ThreeBodyManySpeciesKernel(BaseThreeBody):
"""Three body many species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ThreeBodyManySpecies', theta=theta, bounds=bounds)
self.type = "multi"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
k3_ee (func): energy-energy kernel
k3_ef (func): energy-force kernel
k3_ff (func): force-force kernel
"""
logger.info("Started compilation of theano three body kernels")
if not (os.path.exists(Mffpath / 'k3_ee_m.pickle') and
os.path.exists(Mffpath / 'k3_ef_m.pickle') and os.path.exists(Mffpath / 'k3_ff_m.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
alpha_1 = rho1[:, 3].flatten()
alpha_2 = rho2[:, 3].flatten()
alpha_j = rho1[:, 4].flatten()
alpha_m = rho2[:, 4].flatten()
alpha_k = rho1[:, 4].flatten()
alpha_n = rho2[:, 4].flatten()
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# CHEMICAL SPECIES MASK
# --------------------------------------------------
# numerical kronecker
def delta_alpha2(a1j, a2m):
d = np.exp(-(a1j - a2m) ** 2 / (2 * 0.00001 ** 2))
return d
# permutation 1
delta_alphas12 = delta_alpha2(alpha_1[0], alpha_2[0])
delta_alphasjm = delta_alpha2(alpha_j[:, None], alpha_m[None, :])
delta_alphas_jmkn = delta_alphasjm[:, None,
:, None] * delta_alphasjm[None, :, None, :]
delta_perm1 = delta_alphas12 * delta_alphas_jmkn
# permutation 3
delta_alphas1m = delta_alpha2(
alpha_1[0, None], alpha_m[None, :]).flatten()
delta_alphasjn = delta_alpha2(alpha_j[:, None], alpha_n[None, :])
delta_alphask2 = delta_alpha2(
alpha_k[:, None], alpha_2[None, 0]).flatten()
delta_perm3 = delta_alphas1m[None, None, :, None] * delta_alphasjn[:, None, None, :] * \
delta_alphask2[None, :, None, None]
# permutation 5
delta_alphas1n = delta_alpha2(
alpha_1[0, None], alpha_n[None, :]).flatten()
delta_alphasj2 = delta_alpha2(
alpha_j[:, None], alpha_2[None, 0]).flatten()
delta_alphaskm = delta_alpha2(alpha_k[:, None], alpha_m[None, :])
delta_perm5 = delta_alphas1n[None, None, None, :] * delta_alphaskm[None, :, :, None] * \
delta_alphasj2[:, None, None, None]
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker_loc = k1n * delta_perm1 + k2n * delta_perm3 + k3n * delta_perm5
# Faster version of cutoff (less calculations)
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))#*((T.sgn(rc-r1j) + 1) / 2)
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))#*((T.sgn(rc-r2m) + 1) / 2)
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))*((T.sgn(rc-rjk) + 1) / 2)
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))*((T.sgn(rc-rmn) + 1) / 2)
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker_loc = ker_loc * mask_jkmn
ker_loc = T.sum(
ker_loc * cut_jk[:, :, None, None] * cut_mn[None, None, :, :])
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker_loc, on_unused_input='ignore')
# energy force kernel
k_ef_cut = T.grad(ker_loc, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef_cut, on_unused_input='ignore')
# force force kernel
k_ff_cut = T.grad(ker_loc, r1)
k_ff_cut_der, updates = scan(lambda j, k_ff_cut, r2: T.grad(k_ff_cut[j], r2),
sequences=T.arange(k_ff_cut.shape[0]), non_sequences=[k_ff_cut, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_cut_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_m.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def k3_ee(conf1, conf2, sig, theta, rc):
"""
Three body kernel for energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy 3-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k3_ef(conf1, conf2, sig, theta, rc):
"""
Three body kernel for energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force 3-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def k3_ff(conf1, conf2, sig, theta, rc):
"""
Three body kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 3-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano three body kernels")
return k3_ee, k3_ef, k3_ff
| 40,850 | 39.728814 | 117 | py |
mff | mff-master/mff/kernels/eamkernel.py | # -*- coding: utf-8 -*-
import logging
import os.path
import pickle
from abc import ABCMeta, abstractmethod
import numpy as np
from mff.kernels.base import Kernel, Mffpath
logger = logging.getLogger(__name__)
def dummy_calc_ff(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "keam_ff_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "keam_ff_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3, 3))
for i in np.arange(len(array)):
result[i] = fun(np.zeros(3), np.zeros(3), array[i][0],
array[i][1], theta0, theta1, theta2)
return result
def dummy_calc_ee(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping, alpha_1_descr = data
if mapping:
if kertype == "single":
with open(Mffpath / "keam_eed_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "keam_eed_m.pickle", 'rb') as f:
fun = pickle.load(f)
else:
if kertype == "single":
with open(Mffpath / "keam_ee_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "keam_ee_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros(len(array))
if not mapping:
for i in np.arange(len(array)):
for conf1 in array[i][0]:
for conf2 in array[i][1]:
result[i] += 0.25*fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
else:
if kertype == "multi":
for i in np.arange(len(array)):
for conf2 in array[i][1]:
result[i] += 0.5*fun(np.zeros(3), array[i]
[0], conf2, theta0, theta1, theta2, alpha_1_descr)
else:
for i in np.arange(len(array)):
for conf2 in array[i][1]:
result[i] += fun(np.zeros(3), array[i]
[0], conf2, theta0, theta1, theta2)
return result
def dummy_calc_ef(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype, mapping, alpha_1_descr = data
if mapping:
if kertype == "single":
with open(Mffpath / "keam_efd_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "keam_efd_m.pickle", 'rb') as f:
fun = pickle.load(f)
else:
if kertype == "single":
with open(Mffpath / "keam_ef_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "keam_ef_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3))
if not mapping:
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
for conf1 in array[i][0]:
conf1 = np.array(conf1, dtype='float')
result[i] += -0.5*fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
else:
if kertype == "multi":
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
conf1 = np.array(array[i][0], dtype='float')
result[i] += -fun(np.zeros(3), conf1,
conf2, theta0, theta1, theta2, alpha_1_descr)
else:
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
conf1 = np.array(array[i][0], dtype='float')
result[i] += -fun(np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
return result
class BaseEam(Kernel, metaclass=ABCMeta):
""" Eam kernel class
Handles the functions common to the single-species and
multi-species two-body kernels.
Args:
kernel_name (str): To choose between single- and two-species kernel
theta[0] (float) : lengthscale of the kernel
theta[1] (float) : decay rate of the cutoff function
theta[2] (float) : cutoff radius
bounds (list) : bounds of the kernel function.
Attributes:
k2_ee (object): Energy-energy kernel function
k2_ef (object): Energy-force kernel function
k2_ff (object): Force-force kernel function
"""
@abstractmethod
def __init__(self, kernel_name, theta, bounds):
super().__init__(kernel_name)
self.theta = theta
self.bounds = bounds
self.k2_ee, self.k2_ef, self.k2_ff, self.k2_ee_d, self.k2_ef_d = self.compile_theano()
def calc(self, X1, X2, ncores=1):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X1) * 3, len(X2) * 3))
if ncores > 1:
confs = []
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam force-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i * 3: i * 3 + 3, 3 * j:3 * j +
3] = result[(j + i * len(X2))]
else:
for i, conf1 in enumerate(X1):
for j, conf2 in enumerate(X2):
ker[i * 3:i * 3 + 3, 3 * j:3 * j + 3] += self.k2_ff(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ef(self, X_glob, X, ncores=1, mapping=False, alpha_1_descr=0):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X_glob), len(X) * 3))
if ncores > 1:
confs = []
for x1 in X_glob:
for x2 in X:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam energy-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping, alpha_1_descr] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in range(len(X_glob)):
for j in range(len(X)):
ker[i, 3 * j:3 * j + 3] = result[(j + i * len(X))]
else:
if not mapping:
for i, x1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
for conf1 in x1:
ker[i, 3 * j:3 * j + 3] += 0.5*self.k2_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
else:
if self.type == 'multi':
for i, conf1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
ker[i, 3 * j:3 * j + 3] += self.k2_ef_d(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2], alpha_1_descr)
else:
for i, conf1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
ker[i, 3 * j:3 * j + 3] += self.k2_ef_d(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ee(self, X1, X2, ncores=1, mapping=False, alpha_1_descr=0):
"""
Calculate the energy-energy kernel between two global environments.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N1 x N2 matrix of the scalar-valued kernels
"""
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam energy-energy kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, mapping, alpha_1_descr] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
ker = np.zeros((len(X1), len(X2)))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i, j] = result[j + i*len(X2)]
else:
if not mapping:
ker = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf1 in x1:
for conf2 in x2:
ker[i, j] += 0.25*self.k2_ee(conf1, conf2, self.theta[0],
self.theta[1], self.theta[2])
else:
if self.type == 'multi':
ker = np.zeros((len(X1), len(X2)))
for i, conf1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf2 in x2:
ker[i, j] += 0.5*self.k2_ee_d(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2], alpha_1_descr)
else:
ker = np.zeros((len(X1), len(X2)))
for i, conf1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf2 in x2:
ker[i, j] += 0.5*self.k2_ee_d(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_gram(self, X, ncores=1, eval_gradient=False):
"""
Calculate the force-force gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N*3 x N*3 gram matrix of the matrix-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.asarray([X[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam force-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
off_diag = np.zeros((len(X) * 3, len(X) * 3))
diag = np.zeros((len(X) * 3, len(X) * 3))
for i in np.arange(len(X)):
diag[3 * i:3 * i + 3, 3 * i:3 * i +
3] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 *
j + 3] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
off_diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[3 * i:3 * i + 3, 3 * i:3 * i + 3] = \
self.k2_ff(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 * j + 3] = \
self.k2_ff(X[i], X[j], self.theta[0],
self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # The gram matrix is symmetric
return gram
def calc_gram_e(self, X, ncores=1, eval_gradient=False):
"""
Calculate the energy-energy gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N x N gram matrix of the scalar-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.array([list(X[i]), list(X[j])])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam energy-energy gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False, False] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
off_diag = np.zeros((len(X), len(X)))
diag = np.zeros((len(X), len(X)))
for i in np.arange(len(X)):
diag[i, i] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[i, j] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0], X.shape[0]))
off_diag = np.zeros((X.shape[0], X.shape[0]))
for i in np.arange(X.shape[0]):
for k, conf1 in enumerate(X[i]):
diag[i, i] += 0.25*self.k2_ee(conf1, conf1, self.theta[0],
self.theta[1], self.theta[2])
for conf2 in X[i][:k]:
# *2 here to speed up the loop
diag[i, i] += 0.25*2.0*self.k2_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
for j in np.arange(i):
for conf1 in X[i]:
for conf2 in X[j]:
off_diag[i, j] += 0.25*self.k2_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # Gram matrix is symmetric
return gram
def calc_gram_ef(self, X, X_glob, ncores=1, eval_gradient=False):
"""
Calculate the energy-force gram matrix for a set of configurations X.
This returns a non-symmetric matrix which is equal to the transpose of
the force-energy gram matrix.
Args:
X (list): list of N1 M1x5 arrays containing xyz coordinates and atomic species
X_glob (list): list of N2 M2x5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N2 x N1*3 gram matrix of the vector-valued kernels
"""
gram = np.zeros((X_glob.shape[0], X.shape[0] * 3))
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Multiprocessing
confs = []
for i in np.arange(len(X_glob)):
for j in np.arange(len(X)):
thislist = np.asarray([X_glob[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the eam energy-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type, False, False] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
gram[i, 3 * j:3 * j + 3] = result[(j + i * X.shape[0])]
else:
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
for k in X_glob[i]:
gram[i, 3 * j:3 * j + 3] += 0.5*self.k2_ef(
k, X[j], self.theta[0], self.theta[1], self.theta[2])
self.gram_ef = gram
return gram
@staticmethod
@abstractmethod
def compile_theano():
return None, None, None, None, None
class EamSingleSpeciesKernel(BaseEam):
"""Eam single species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): cutoff radius
theta[2] (float): radius in the descriptor's exponent
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-1, 1e2), (1e-1, 1e2))):
super().__init__(kernel_name='EamSingleSpecies', theta=theta, bounds=bounds)
self.type = "single"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for global energy and force learning
The position of the atoms relative to the central one, and their chemical species
are defined by a matrix of dimension Mx5 here called r1 and r2.
Returns:
k2_ee (func): energy-energy kernel
k2_ef (func): energy-force kernel
k2_ff (func): force-force kernel
k2_ee_map (func): energy-energy kernel that takes descriptor as one argument
k2_ef_map (func): energy-force kernel that takes descriptor as one argument
"""
if not (os.path.exists(Mffpath / 'keam_ee_s.pickle') and
os.path.exists(Mffpath / 'keam_ef_s.pickle') and os.path.exists(
Mffpath / 'keam_ff_s.pickle')
and os.path.exists(Mffpath / 'keam_eed_s.pickle') and os.path.exists(Mffpath / 'keam_efd_s.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info(
"Started compilation of theano eam single species kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# lengthscale hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
rc = T.dscalar('rc')
# Descriptor as a given input, used to map
q1_descr = T.dscalar('q1_descr')
# Radius to use at denominator in the descriptor
r0 = T.dscalar('r0')
# positions of neighbours without chemical species (3D space assumed)
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
# distances of atoms wrt to the central one and wrt each other in 1 and 2
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
esp_term_1 = (r1j/r0 - 1)
esp_term_2 = (r2m/r0 - 1)
cut_1 = 0.5*(1 + T.cos(np.pi*r1j/rc))*((T.sgn(rc-r1j) + 1) / 2)
cut_2 = 0.5*(1 + T.cos(np.pi*r2m/rc))*((T.sgn(rc-r2m) + 1) / 2)
q1 = T.sum(T.exp(-esp_term_1)*cut_1)
q2 = T.sum(T.exp(-esp_term_2)*cut_2)
k = T.exp(-(q1-q2)**2/(2*sig**2))
k_descr = T.exp(-(q1_descr-q2)**2/(2*sig**2))
# energy energy kernel
k_ee_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k,
allow_input_downcast=False, on_unused_input='warn')
# energy force kernel - Used to predict energies from forces
k_ef = T.grad(k, r2)
k_ef_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k_ef,
allow_input_downcast=False, on_unused_input='warn')
# force force kernel - it uses only local atom pairs to avoid useless computation
k_ff = T.grad(k, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k_ff_der,
allow_input_downcast=False, on_unused_input='warn')
# energy energy descriptor kernel
k_ee_fun_d = function([r2, q1_descr, rho2, sig, rc, r0], k_descr,
allow_input_downcast=False, on_unused_input='warn')
# energy force descriptor kernel
k_ef_descr = T.grad(k_descr, r2)
k_ef_fun_d = function([r2, q1_descr, rho2, sig, rc, r0], k_ef_descr,
allow_input_downcast=False, on_unused_input='warn')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'keam_ee_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'keam_ef_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'keam_ff_s.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
with open(Mffpath / 'keam_eed_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun_d, f)
with open(Mffpath / 'keam_efd_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun_d, f)
else:
print("Loading Kernels")
with open(Mffpath / "keam_ee_s.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "keam_ef_s.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "keam_ff_s.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
with open(Mffpath / 'keam_eed_s.pickle', 'rb') as f:
k_ee_fun_d = pickle.load(f)
with open(Mffpath / 'keam_efd_s.pickle', 'rb') as f:
k_ef_fun_d = pickle.load(f)
# --------------------------------------------------
# WRAPPERS (we don't want to plug the position of the central element every time)
# --------------------------------------------------
def k2_ee(conf1, conf2, sig, rc, r0):
"""
Eam kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (float): scalar valued energy-energy Eam kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ef(conf1, conf2, sig, rc, r0):
"""
Eam kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (array): 3x1 energy-force Eam kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ff(conf1, conf2, sig, rc, r0):
"""
Eam kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (matrix): 3x3 force-force Eam kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ee_d(descr1, conf2, sig, rc, r0):
"""
Eam kernel for global energy-force correlation
Args:
descr1 (float): descriptor calculated for the first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (array): 3x1 energy-force Eam kernel
"""
return k_ee_fun_d(np.zeros(3), descr1, conf2, sig, rc, r0)
def k2_ef_d(descr1, conf2, sig, rc, r0):
"""
Eam kernel for force-force correlation
Args:
descr1 (float): descriptor calculated for the first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[`]
Returns:
kernel (matrix): 3x3 force-force Eam kernel
"""
return -k_ef_fun_d(np.zeros(3), descr1, conf2, sig, rc, r0)
logger.info("Ended compilation of theano eam single species kernels")
return k2_ee, k2_ef, k2_ff, k2_ee_d, k2_ef_d
class EamManySpeciesKernel(BaseEam):
"""Eam multi species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): cutoff radius
theta[2] (float): radius in the descriptor's exponent
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='EamMultiSpecies', theta=theta, bounds=bounds)
self.type = "multi"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for global energy and force learning
The position of the atoms relative to the central one, and their chemical species
are defined by a matrix of dimension Mx5 here called r1 and r2.
Returns:
k2_ee (func): energy-energy kernel
k2_ef (func): energy-force kernel
k2_ff (func): force-force kernel
k2_ee_map (func): energy-energy kernel that takes descriptor as one argument
k2_ef_map (func): energy-force kernel that takes descriptor as one argument
"""
if not (os.path.exists(Mffpath / 'keam_ee_m.pickle') and
os.path.exists(Mffpath / 'keam_ef_m.pickle') and os.path.exists(
Mffpath / 'keam_ff_m.pickle')
and os.path.exists(Mffpath / 'keam_eed_m.pickle') and os.path.exists(Mffpath / 'keam_efd_m.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info(
"Started compilation of theano eam multi species kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# lengthscale hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
rc = T.dscalar('rc')
# Descriptor as a given input, used to map
q1_descr = T.dscalar('q1_descr')
# Element of the central atom if descriptor is Given
alpha_1_descr = T.dscalar('alpha_1_descr')
# Radius to use at denominator in the descriptor
r0 = T.dscalar('r0')
# positions of neighbours without chemical species (3D space assumed)
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
alpha_1 = rho1[0, 3] # .flatten()
alpha_2 = rho2[0, 3] # .flatten()
# numerical kronecker
def delta_alpha(a1j, a2m):
d = T.exp(-(a1j - a2m) ** 2 / (2 * 1e-5 ** 2))
return d
# matrices determining whether couples of atoms have the same atomic number
delta_alpha_12 = delta_alpha(alpha_1, alpha_2)
delta_alpha_12_descr = delta_alpha(alpha_1_descr, alpha_2)
# distances of atoms wrt to the central one and wrt each other in 1 and 2
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
esp_term_1 = (r1j/r0 - 1)
esp_term_2 = (r2m/r0 - 1)
cut_1 = 0.5*(1 + T.cos(np.pi*r1j/rc))*((T.sgn(rc-r1j) + 1) / 2)
cut_2 = 0.5*(1 + T.cos(np.pi*r2m/rc))*((T.sgn(rc-r2m) + 1) / 2)
q1 = T.sum(T.exp(-esp_term_1)*cut_1)
q2 = T.sum(T.exp(-esp_term_2)*cut_2)
k = T.exp(-(q1-q2)**2/(2*sig**2))*delta_alpha_12
k_descr = T.exp(-(q1_descr-q2)**2/(2*sig**2))*delta_alpha_12_descr
# energy energy kernel
k_ee_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k,
allow_input_downcast=False, on_unused_input='warn')
# energy force kernel - Used to predict energies from forces
k_ef = T.grad(k, r2)
k_ef_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k_ef,
allow_input_downcast=False, on_unused_input='warn')
# force force kernel - it uses only local atom pairs to avoid useless computation
k_ff = T.grad(k, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function([r1, r2, rho1, rho2, sig, rc, r0], k_ff_der,
allow_input_downcast=False, on_unused_input='warn')
# energy energy descriptor kernel
k_ee_fun_d = function([r2, q1_descr, rho2, sig, rc, r0, alpha_1_descr], k_descr,
allow_input_downcast=False, on_unused_input='warn')
# energy force descriptor kernel
k_ef_descr = T.grad(k_descr, r2)
k_ef_fun_d = function([r2, q1_descr, rho2, sig, rc, r0, alpha_1_descr], k_ef_descr,
allow_input_downcast=False, on_unused_input='warn')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'keam_ee_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'keam_ef_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'keam_ff_m.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
with open(Mffpath / 'keam_eed_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun_d, f)
with open(Mffpath / 'keam_efd_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun_d, f)
else:
print("Loading Kernels")
with open(Mffpath / "keam_ee_m.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "keam_ef_m.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "keam_ff_m.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
with open(Mffpath / 'keam_eed_m.pickle', 'rb') as f:
k_ee_fun_d = pickle.load(f)
with open(Mffpath / 'keam_efd_m.pickle', 'rb') as f:
k_ef_fun_d = pickle.load(f)
# --------------------------------------------------
# WRAPPERS (we don't want to plug the position of the central element every time)
# --------------------------------------------------
def k2_ee(conf1, conf2, sig, rc, r0):
"""
Eam kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (float): scalar valued energy-energy Eam kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ef(conf1, conf2, sig, rc, r0):
"""
Eam kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (array): 3x1 energy-force Eam kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ff(conf1, conf2, sig, rc, r0):
"""
Eam kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
Returns:
kernel (matrix): 3x3 force-force Eam kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, rc, r0)
def k2_ee_d(descr1, conf2, sig, rc, r0, alpha_1_descr):
"""
Eam kernel for global energy-force correlation
Args:
descr1 (float): descriptor calculated for the first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[2]
alpha_1_descr (int): element of the central atom
Returns:
kernel (array): 3x1 energy-force Eam kernel
"""
return k_ee_fun_d(np.zeros(3), descr1, conf2, sig, rc, r0, alpha_1_descr)
def k2_ef_d(descr1, conf2, sig, rc, r0, alpha_1_descr):
"""
Eam kernel for force-force correlation
Args:
descr1 (float): descriptor calculated for the first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
rc (float): cutoff distance hyperparameter theta[1]
alpha_1_descr (int): element of the central atom
Returns:
kernel (matrix): 3x3 force-force Eam kernel
"""
return -k_ef_fun_d(np.zeros(3), descr1, conf2, sig, rc, r0, alpha_1_descr)
logger.info("Ended compilation of theano eam multi species kernels")
return k2_ee, k2_ef, k2_ff, k2_ee_d, k2_ef_d
| 42,667 | 40.7087 | 134 | py |
mff | mff-master/mff/kernels/__init__.py | from .eamkernel import EamManySpeciesKernel, EamSingleSpeciesKernel
from .manybodykernel import (ManyBodyManySpeciesKernel,
ManyBodySingleSpeciesKernel)
from .threebodykernel import (ThreeBodyManySpeciesKernel,
ThreeBodySingleSpeciesKernel)
from .twobodykernel import TwoBodyManySpeciesKernel, TwoBodySingleSpeciesKernel
__all__ = [TwoBodySingleSpeciesKernel,
TwoBodyManySpeciesKernel,
ThreeBodySingleSpeciesKernel,
ThreeBodyManySpeciesKernel,
ManyBodySingleSpeciesKernel,
ManyBodyManySpeciesKernel,
EamSingleSpeciesKernel,
EamManySpeciesKernel]
| 684 | 41.8125 | 79 | py |
mff | mff-master/mff/kernels/manybodykernel.py | # -*- coding: utf-8 -*-
import logging
import os.path
import pickle
from abc import ABCMeta, abstractmethod
import numpy as np
from mff.kernels.base import Kernel, Mffpath
logger = logging.getLogger(__name__)
def dummy_calc_ff(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3, 3))
for i in np.arange(len(array)):
result[i] = fun(np.zeros(3), np.zeros(3), array[i][0],
array[i][1], theta0, theta1, theta2)
return result
def dummy_calc_ee(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel valuesf
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros(len(array))
for i in np.arange(len(array)):
for conf1 in array[i][0]:
for conf2 in array[i][1]:
result[i] += fun(np.zeros(3), np.zeros(3),
conf1, conf2, theta0, theta1, theta2)
return result
def dummy_calc_ef(data):
""" Function used when multiprocessing.
Args:
data (list of objects): contains all the information required
for the computation of the kernel values
Returns:
result (array): the computed kernel values
"""
array, theta0, theta1, theta2, kertype = data
if kertype == "single":
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
fun = pickle.load(f)
elif kertype == "multi":
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
fun = pickle.load(f)
result = np.zeros((len(array), 3))
for i in np.arange(len(array)):
conf2 = np.array(array[i][1], dtype='float')
for conf1 in array[i][0]:
conf1 = np.array(conf1, dtype='float')
result[i] += -fun(np.zeros(3), np.zeros(3), conf1,
conf2, theta0, theta1, theta2)
return result
class BaseManyBody(Kernel, metaclass=ABCMeta):
""" Many body kernel class
Handles the functions common to the single-species and
multi-species three-body kernels.
Args:
kernel_name (str): To choose between single- and two-species kernel
theta[0] (float) : lengthscale of the kernel
theta[1] (float) : decay rate of the cutoff function
theta[2] (float) : cutoff radius
bounds (list) : bounds of the kernel function.
Attributes:
km_ee (object): Energy-energy kernel function
km_ef (object): Energy-force kernel function
km_ff (object): Force-force kernel function
"""
@abstractmethod
def __init__(self, kernel_name, theta, bounds):
super().__init__(kernel_name)
self.theta = theta
self.bounds = bounds
self.km_ee, self.km_ef, self.km_ff = self.compile_theano()
def calc(self, X1, X2, ncores=1):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X1) * 3, len(X2) * 3))
if ncores > 1:
confs = []
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body force-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i * 3: i * 3 + 3, 3 * j:3 * j +
3] = result[(j + i * len(X2))]
else:
for i, conf1 in enumerate(X1):
for j, conf2 in enumerate(X2):
ker[i * 3:i * 3 + 3, 3 * j:3 * j + 3] += self.km_ff(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ef(self, X_glob, X, ncores=1, mapping = False):
"""
Calculate the energy-force kernel between two sets of configurations.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N2*3 matrix of the vector-valued kernels
"""
ker = np.zeros((len(X_glob), len(X) * 3))
if ncores > 1:
confs = []
for x1 in X_glob:
for x2 in X:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-force kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.vstack(np.asarray(result))
for i in range(len(X_glob)):
for j in range(len(X)):
ker[i, 3 * j:3 * j + 3] = result[(j + i * len(X))]
else:
for i, x1 in enumerate(X_glob):
for j, conf2 in enumerate(X):
for conf1 in x1:
ker[i, 3 * j:3 * j + 3] += self.km_ef(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_ee(self, X1, X2, ncores=1, mapping = False):
"""
Calculate the energy-energy kernel between two global environments.
Args:
X1 (list): list of N1 Mx5 arrays containing xyz coordinates and atomic species
X2 (list): list of N2 Mx5 arrays containing xyz coordinates and atomic species
Returns:
K (matrix): N1 x N2 matrix of the scalar-valued kernels
"""
if ncores > 1: # Used for multiprocessing
confs = []
# Build a list of all input pairs which matrix needs to be computed
for x1 in X1:
for x2 in X2:
confs.append(np.asarray([x1, x2]))
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the 3-body energy-energy kernel calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) * factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
ker = np.zeros((len(X1), len(X2)))
for i in range(len(X1)):
for j in range(len(X2)):
ker[i, j] = result[j + i*len(X2)]
else:
ker = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
for conf1 in x1:
for conf2 in x2:
ker[i, j] += self.km_ee(conf1, conf2,
self.theta[0], self.theta[1], self.theta[2])
return ker
def calc_gram(self, X, ncores=1, eval_gradient=False):
"""
Calculate the force-force gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N*3 x N*3 gram matrix of the matrix-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
confs = []
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.asarray([X[i], X[j]])
confs.append(thislist)
n = len(confs)
logger.info(
'Using %i cores for the many-body force-force gram matrix calculation' % (ncores))
import sys
sys.setrecursionlimit(100000)
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ff, clist)
pool.close()
pool.join()
result = np.concatenate(result).reshape((n, 3, 3))
off_diag = np.zeros((len(X) * 3, len(X) * 3))
diag = np.zeros((len(X) * 3, len(X) * 3))
for i in np.arange(len(X)):
diag[3 * i:3 * i + 3, 3 * i:3 * i +
3] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 *
j + 3] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
off_diag = np.zeros((X.shape[0] * 3, X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[3 * i:3 * i + 3, 3 * i:3 * i + 3] = \
self.km_ff(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
for j in np.arange(i):
off_diag[3 * i:3 * i + 3, 3 * j:3 * j + 3] = \
self.km_ff(X[i], X[j], self.theta[0],
self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T
return gram
def calc_gram_e(self, X, ncores=1, eval_gradient=False): # Untested
"""
Calculate the energy-energy gram matrix for a set of configurations X.
Args:
X (list): list of N Mx5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N x N gram matrix of the scalar-valued kernels
"""
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1:
confs = []
# Build a list of all input pairs which matrix needs to be computed
for i in np.arange(len(X)):
for j in np.arange(i + 1):
thislist = np.array([list(X[i]), list(X[j])])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the many-body energy-energy gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ee, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
off_diag = np.zeros((len(X), len(X)))
diag = np.zeros((len(X), len(X)))
for i in np.arange(len(X)):
diag[i, i] = result[i + i * (i + 1) // 2]
for j in np.arange(i):
off_diag[i, j] = result[j + i * (i + 1) // 2]
else:
diag = np.zeros((X.shape[0], X.shape[0]))
off_diag = np.zeros((X.shape[0], X.shape[0]))
for i in np.arange(X.shape[0]):
for k, conf1 in enumerate(X[i]):
diag[i, i] += self.km_ee(conf1, conf1,
self.theta[0], self.theta[1], self.theta[2])
for conf2 in X[i][:k]:
# *2 here to speed up the loop
diag[i, i] += 2.0*self.km_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
for j in np.arange(i):
for conf1 in X[i]:
for conf2 in X[j]:
off_diag[i, j] += self.km_ee(
conf1, conf2, self.theta[0], self.theta[1], self.theta[2])
gram = diag + off_diag + off_diag.T # Gram matrix is symmetric
return gram
def calc_gram_ef(self, X, X_glob, ncores=1, eval_gradient=False):
"""
Calculate the energy-force gram matrix for a set of configurations X.
This returns a non-symmetric matrix which is equal to the transpose of
the force-energy gram matrix.
Args:
X (list): list of N1 M1x5 arrays containing xyz coordinates and atomic species
X_glob (list): list of N2 M2x5 arrays containing xyz coordinates and atomic species
ncores (int): Number of CPU nodes to use for multiprocessing (default is 1)
eval_gradient (bool): if True, evaluate the gradient of the gram matrix
Returns:
gram (matrix): N2 x N1*3 gram matrix of the vector-valued kernels
"""
gram = np.zeros((X_glob.shape[0], X.shape[0] * 3))
if eval_gradient:
raise NotImplementedError('ERROR: GRADIENT NOT IMPLEMENTED YET')
else:
if ncores > 1: # Multiprocessing
confs = []
for i in np.arange(len(X_glob)):
for j in np.arange(len(X)):
thislist = np.asarray([X_glob[i], X[j]])
confs.append(thislist)
n = len(confs)
import sys
sys.setrecursionlimit(100000)
logger.info(
'Using %i cores for the many-body energy-force gram matrix calculation' % (ncores))
# Way to split the kernels functions to compute evenly across the nodes
splitind = np.zeros(ncores + 1)
factor = (n + (ncores - 1)) / ncores
splitind[1:-1] = [(i + 1) *
factor for i in np.arange(ncores - 1)]
splitind[-1] = n
splitind = splitind.astype(int)
clist = [[confs[splitind[i]:splitind[i + 1]], self.theta[0], self.theta[1], self.theta[2],
self.type] for i in np.arange(ncores)] # Shape is ncores * (ntrain*(ntrain+1)/2)/ncores
import multiprocessing as mp
pool = mp.Pool(ncores)
result = pool.map(dummy_calc_ef, clist)
pool.close()
pool.join()
result = np.concatenate(result).ravel()
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
gram[i, 3 * j:3 * j + 3] = result[3 *
(j + i * X.shape[0]):3 + 3*(j + i * X.shape[0])]
else:
for i in np.arange(X_glob.shape[0]):
for j in np.arange(X.shape[0]):
for k in X_glob[i]:
gram[i, 3 * j:3 * j + 3] += self.km_ef(
k, X[j], self.theta[0], self.theta[1], self.theta[2])
self.gram_ef = gram
return gram
def calc_diag(self, X):
diag = np.zeros((X.shape[0] * 3))
for i in np.arange(X.shape[0]):
diag[i * 3:(i + 1) * 3] = np.diag(self.km_ff(X[i], X[i],
self.theta[0], self.theta[1], self.theta[2]))
return diag
def calc_diag_e(self, X):
diag = np.zeros((X.shape[0]))
for i in np.arange(X.shape[0]):
diag[i] = self.km_ee(X[i], X[i], self.theta[0],
self.theta[1], self.theta[2])
return diag
@staticmethod
@abstractmethod
def compile_theano():
return None, None, None
class ManyBodySingleSpeciesKernel(BaseManyBody):
"""Many body two species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ManyBodySingleSpecies', theta=theta, bounds=bounds)
self.type = "single"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
km_ee (func): energy-energy kernel
km_ef (func): energy-force kernel
km_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k3_ee_s.pickle') and
os.path.exists(Mffpath / 'k3_ef_s.pickle') and os.path.exists(Mffpath / 'k3_ff_s.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano three body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker = k1n + k2n + k3n
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS AND ADD CUTOFF
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker = ker * mask_jkmn
ker = T.sum(ker * cut_jk[:, :, None, None]
* cut_mn[None, None, :, :])
ker = T.exp(ker / 1000)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# global energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker, on_unused_input='ignore')
# global energy force kernel
k_ef = T.grad(ker, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef, on_unused_input='ignore')
# local force force kernel
k_ff = T.grad(ker, r1)
k_ff_der, updates = scan(lambda j, k_ff, r2: T.grad(k_ff[j], r2),
sequences=T.arange(k_ff.shape[0]), non_sequences=[k_ff, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_s.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_s.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_s.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_s.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_s.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_s.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def km_ee(conf1, conf2, sig, theta, rc):
"""
Many body kernel for global energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy many-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ef(conf1, conf2, sig, theta, rc):
"""
Many body kernel for global energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force many-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ff(conf1, conf2, sig, theta, rc):
"""
Many body kernel for local force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force 3-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano three body kernels")
return km_ee, km_ef, km_ff
class ManyBodyManySpeciesKernel(BaseManyBody):
"""Many body many species kernel.
Args:
theta[0] (float): lengthscale of the kernel
theta[1] (float): decay rate of the cutoff function
theta[2] (float): cutoff radius
"""
def __init__(self, theta=(1., 1., 1.), bounds=((1e-2, 1e2), (1e-2, 1e2), (1e-2, 1e2))):
super().__init__(kernel_name='ManyBodyManySpecies', theta=theta, bounds=bounds)
self.type = "multi"
@staticmethod
def compile_theano():
"""
This function generates theano compiled kernels for energy and force learning
ker_jkmn_withcutoff = ker_jkmn #* cutoff_ikmn
The position of the atoms relative to the centrla one, and their chemical species
are defined by a matrix of dimension Mx5
Returns:
km_ee (func): energy-energy kernel
km_ef (func): energy-force kernel
km_ff (func): force-force kernel
"""
if not (os.path.exists(Mffpath / 'k3_ee_m.pickle') and
os.path.exists(Mffpath / 'k3_ef_m.pickle') and os.path.exists(Mffpath / 'k3_ff_m.pickle')):
print("Building Kernels")
import theano.tensor as T
from theano import function, scan
logger.info("Started compilation of theano three body kernels")
# --------------------------------------------------
# INITIAL DEFINITIONS
# --------------------------------------------------
# positions of central atoms
r1, r2 = T.dvectors('r1d', 'r2d')
# positions of neighbours
rho1, rho2 = T.dmatrices('rho1', 'rho2')
# hyperparameter
sig = T.dscalar('sig')
# cutoff hyperparameters
theta = T.dscalar('theta')
rc = T.dscalar('rc')
# positions of neighbours without chemical species
rho1s = rho1[:, 0:3]
rho2s = rho2[:, 0:3]
alpha_1 = rho1[:, 3].flatten()
alpha_2 = rho2[:, 3].flatten()
alpha_j = rho1[:, 4].flatten()
alpha_m = rho2[:, 4].flatten()
alpha_k = rho1[:, 4].flatten()
alpha_n = rho2[:, 4].flatten()
# --------------------------------------------------
# RELATIVE DISTANCES TO CENTRAL VECTOR AND BETWEEN NEIGHBOURS
# --------------------------------------------------
# first and second configuration
r1j = T.sqrt(T.sum((rho1s[:, :] - r1[None, :]) ** 2, axis=1))
r2m = T.sqrt(T.sum((rho2s[:, :] - r2[None, :]) ** 2, axis=1))
rjk = T.sqrt(
T.sum((rho1s[None, :, :] - rho1s[:, None, :]) ** 2, axis=2))
rmn = T.sqrt(
T.sum((rho2s[None, :, :] - rho2s[:, None, :]) ** 2, axis=2))
# --------------------------------------------------
# CHEMICAL SPECIES MASK
# --------------------------------------------------
# numerical kronecker
def delta_alpha2(a1j, a2m):
d = np.exp(-(a1j - a2m) ** 2 / (2 * 0.00001 ** 2))
return d
# permutation 1
delta_alphas12 = delta_alpha2(alpha_1[0], alpha_2[0])
delta_alphasjm = delta_alpha2(alpha_j[:, None], alpha_m[None, :])
delta_alphas_jmkn = delta_alphasjm[:, None,
:, None] * delta_alphasjm[None, :, None, :]
delta_perm1 = delta_alphas12 * delta_alphas_jmkn
# permutation 3
delta_alphas1m = delta_alpha2(
alpha_1[0, None], alpha_m[None, :]).flatten()
delta_alphasjn = delta_alpha2(alpha_j[:, None], alpha_n[None, :])
delta_alphask2 = delta_alpha2(
alpha_k[:, None], alpha_2[None, 0]).flatten()
delta_perm3 = delta_alphas1m[None, None, :, None] * delta_alphasjn[:, None, None, :] * \
delta_alphask2[None, :, None, None]
# permutation 5
delta_alphas1n = delta_alpha2(
alpha_1[0, None], alpha_n[None, :]).flatten()
delta_alphasj2 = delta_alpha2(
alpha_j[:, None], alpha_2[None, 0]).flatten()
delta_alphaskm = delta_alpha2(alpha_k[:, None], alpha_m[None, :])
delta_perm5 = delta_alphas1n[None, None, None, :] * delta_alphaskm[None, :, :, None] * \
delta_alphasj2[:, None, None, None]
# --------------------------------------------------
# BUILD THE KERNEL
# --------------------------------------------------
# Squared exp of differences
se_1j2m = T.exp(-(r1j[:, None] - r2m[None, :])
** 2 / (2 * sig ** 2))
se_jkmn = T.exp(-(rjk[:, :, None, None] -
rmn[None, None, :, :]) ** 2 / (2 * sig ** 2))
se_jk2m = T.exp(-(rjk[:, :, None] -
r2m[None, None, :]) ** 2 / (2 * sig ** 2))
se_1jmn = T.exp(-(r1j[:, None, None] -
rmn[None, :, :]) ** 2 / (2 * sig ** 2))
# Kernel not summed (cyclic permutations)
k1n = (se_1j2m[:, None, :, None] *
se_1j2m[None, :, None, :] * se_jkmn)
k2n = (se_1jmn[:, None, :, :] * se_jk2m[:, :,
None, :] * se_1j2m[None, :, :, None])
k3n = (se_1j2m[:, None, None, :] *
se_jk2m[:, :, :, None] * se_1jmn[None, :, :, :])
# final shape is M1 M1 M2 M2
ker_loc = k1n * delta_perm1 + k2n * delta_perm3 + k3n * delta_perm5
# Faster version of cutoff (less calculations)
cut_j = 0.5*(1+T.cos(np.pi*r1j/rc))
cut_m = 0.5*(1+T.cos(np.pi*r2m/rc))
cut_jk = cut_j[:,None]*cut_j[None,:]*0.5*(1+T.cos(np.pi*rjk/rc))
cut_mn = cut_m[:,None]*cut_m[None,:]*0.5*(1+T.cos(np.pi*rmn/rc))
# --------------------------------------------------
# REMOVE DIAGONAL ELEMENTS
# --------------------------------------------------
# remove diagonal elements AND lower triangular ones from first configuration
mask_jk = T.triu(T.ones_like(rjk)) - T.identity_like(rjk)
# remove diagonal elements from second configuration
mask_mn = T.ones_like(rmn) - T.identity_like(rmn)
# Combine masks
mask_jkmn = mask_jk[:, :, None, None] * mask_mn[None, None, :, :]
# Apply mask and then apply cutoff functions
ker_loc = ker_loc * mask_jkmn
ker_loc = T.sum(
ker_loc * cut_jk[:, :, None, None] * cut_mn[None, None, :, :])
ker_loc = T.exp(ker_loc / 20)
# --------------------------------------------------
# FINAL FUNCTIONS
# --------------------------------------------------
# energy energy kernel
k_ee_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], ker_loc, on_unused_input='ignore')
# energy force kernel
k_ef_cut = T.grad(ker_loc, r2)
k_ef_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ef_cut, on_unused_input='ignore')
# force force kernel
k_ff_cut = T.grad(ker_loc, r1)
k_ff_cut_der, updates = scan(lambda j, k_ff_cut, r2: T.grad(k_ff_cut[j], r2),
sequences=T.arange(k_ff_cut.shape[0]), non_sequences=[k_ff_cut, r2])
k_ff_fun = function(
[r1, r2, rho1, rho2, sig, theta, rc], k_ff_cut_der, on_unused_input='ignore')
# Save the function that we want to use for multiprocessing
# This is necessary because theano is a crybaby and does not want to access the
# Automaticallly stored compiled object from different processes
with open(Mffpath / 'k3_ee_m.pickle', 'wb') as f:
pickle.dump(k_ee_fun, f)
with open(Mffpath / 'k3_ef_m.pickle', 'wb') as f:
pickle.dump(k_ef_fun, f)
with open(Mffpath / 'k3_ff_m.pickle', 'wb') as f:
pickle.dump(k_ff_fun, f)
else:
print("Loading Kernels")
with open(Mffpath / "k3_ee_m.pickle", 'rb') as f:
k_ee_fun = pickle.load(f)
with open(Mffpath / "k3_ef_m.pickle", 'rb') as f:
k_ef_fun = pickle.load(f)
with open(Mffpath / "k3_ff_m.pickle", 'rb') as f:
k_ff_fun = pickle.load(f)
# WRAPPERS (we don't want to plug the position of the central element every time)
def km_ee(conf1, conf2, sig, theta, rc):
"""
Many body kernel for energy-energy correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (float): scalar valued energy-energy many-body kernel
"""
return k_ee_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ef(conf1, conf2, sig, theta, rc):
"""
Many body kernel for energy-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (array): 3x1 energy-force many-body kernel
"""
return -k_ef_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
def km_ff(conf1, conf2, sig, theta, rc):
"""
Many body kernel for force-force correlation
Args:
conf1 (array): first configuration.
conf2 (array): second configuration.
sig (float): lengthscale hyperparameter theta[0]
theta (float): cutoff decay rate hyperparameter theta[1]
rc (float): cutoff distance hyperparameter theta[2]
Returns:
kernel (matrix): 3x3 force-force many-body kernel
"""
return k_ff_fun(np.zeros(3), np.zeros(3), conf1, conf2, sig, theta, rc)
logger.info("Ended compilation of theano many body kernels")
return km_ee, km_ef, km_ff
| 40,153 | 38.994024 | 114 | py |
T-Concord3D | T-Concord3D-master/test.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from tqdm import tqdm
import math
from utils.metric_util import per_class_iu, fast_hist_crop, fast_ups_crop
from dataloader.pc_dataset import get_label_name, get_label_inv_name, update_config
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
import torch.nn.functional as F
from utils.load_save_util import load_checkpoint
from utils.ups import enable_dropout
import warnings
from torch.nn.parallel import DistributedDataParallel
warnings.filterwarnings("ignore")
def save_predictions_sematicKitti(predict_labels_serialized, predict_prob_serialized, path_to_seq_folder,
path_to_seq_folder_prob, sample_name, challenge=False):
# dump predictions and probability
predict_labels_serialized.tofile(path_to_seq_folder + '/' + sample_name + '.label')
if not challenge:
if not os.path.exists(path_to_seq_folder_prob):
os.makedirs(path_to_seq_folder_prob)
predict_prob_serialized.tofile(path_to_seq_folder_prob + '/' + sample_name + '.label')
def save_predictions_wod(predict_labels_serialized, predict_prob_serialized, path_to_seq_folder,
path_to_seq_folder_prob, sample_name, challenge=False):
# dump predictions and probability
np.save(os.path.join(path_to_seq_folder, sample_name), predict_labels_serialized)
if not challenge:
if not os.path.exists(path_to_seq_folder_prob):
os.makedirs(path_to_seq_folder_prob)
np.save(os.path.join(path_to_seq_folder_prob, sample_name), predict_prob_serialized)
def main(args):
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
if args.mode == 'infer' or args.mode == 'val' or args.mode == 'test':
configs['train_params']['ssl'] = False
# send config parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
dataset_type = 'SemanticKITTI' if 'SemKITTI_sk_multiscan' == dataset_config['pc_dataset_type'] else 'WOD'
train_dataloader_config = configs['train_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
val_dataloader_config = configs['val_data_loader']
test_dataloader_config = configs['test_data_loader']
val_batch_size = val_dataloader_config['batch_size']
train_batch_size = train_dataloader_config['batch_size']
ssl_batch_size = ssl_dataloader_config['batch_size']
test_batch_size = test_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
T_past_frame = train_hypers['T_past']
T_future_frame = train_hypers['T_future']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_load_path = train_hypers['model_load_path']
model_save_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
print(unique_label_str)
SemKITTI_learningmap_inv = get_label_inv_name(dataset_config["label_mapping"])
model = model_builder.build(model_config).to(pytorch_device)
print(f"model_load_path: {model_load_path}")
if os.path.exists(model_load_path):
model = load_checkpoint(model_load_path, model, map_location=pytorch_device)
print(f" loading model_load_path: {model_load_path}")
# if args.mgpus:
# my_model = nn.DataParallel(my_model)
# #my_model.cuda()
# #my_model.cuda()
if distributed:
model = DistributedDataParallel(
model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=True
)
optimizer = optim.Adam(model.parameters(), lr=train_hypers["learning_rate"])
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label)
train_dataset_loader, val_dataset_loader, test_dataset_loader, ssl_dataset_loader = data_builder.build(
dataset_config,
train_dataloader_config,
val_dataloader_config,
test_dataloader_config,
ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
# test and validation
if args.mode == 'val':
dataset_loader = val_dataset_loader
batch_size = val_batch_size
path_to_save_predicted_labels = val_dataloader_config['data_path'] # "val_result"
elif args.mode == 'test':
dataset_loader = test_dataset_loader
batch_size = test_batch_size
path_to_save_predicted_labels = test_dataloader_config['data_path'] # "test_result"
elif args.mode == 'infer':
dataset_loader = ssl_dataset_loader
batch_size = ssl_batch_size
path_to_save_predicted_labels = ssl_dataloader_config['data_path'] # "pseudo_label_result"
# mode to eval
model.eval()
# if uncertainty is used, enable dropout
if args.ups:
# enable dropout (mc)
enable_dropout(model)
# sample forward pass
f_pass = 10
with torch.no_grad():
ups_hist = []
hist_list = []
hist_list_op = []
ups_count = []
def validation_inference(vox_label, grid, pt_labs, pt_fea, ref_st_idx=None, ref_end_idx=None, lcw=None):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in pt_fea]
val_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in grid]
if args.ups:
ups_out_prob = []
for _ in range(f_pass):
predict_labels_raw = model(val_pt_fea_ten, val_grid_ten, batch_size)
ups_out_prob.append(F.softmax(predict_labels_raw, dim=1)) # for selecting positive pseudo-labels
ups_out_prob = torch.stack(ups_out_prob)
out_std = torch.std(ups_out_prob, dim=0)
predict_probablity = torch.mean(ups_out_prob, dim=0)
predict_labels = torch.argmax(predict_probablity, dim=1)
# keep dimension during finding maximum
predict_prob_max, predict_prob_ind = torch.max(predict_probablity, dim=1, keepdim=True)
# squeeze (remove the 1 size form the tensor)
predict_prob_max = torch.squeeze(predict_prob_max)
# get the uncertainty of the most probable prediction
max_std = out_std.gather(1, predict_prob_ind)
# squeeze (remove the 1 size form the tensor)
max_std = torch.squeeze(max_std)
else:
predict_labels_raw = model(val_pt_fea_ten, val_grid_ten, batch_size)
predict_labels = torch.argmax(predict_labels_raw, dim=1)
predict_probablity = torch.nn.functional.softmax(predict_labels_raw, dim=1)
predict_prob_max, predict_prob_ind = predict_probablity.max(dim=1)
# move to cpu and detach to convert to numpy
predict_labels = predict_labels.cpu().detach().numpy()
predict_probabilitys = predict_prob_max.cpu().detach().numpy()
if args.ups:
model_uncertintys = max_std.cpu().detach().numpy()
for count, i_val_grid in enumerate(grid):
if args.save_raw:
predict_raw = predict_labels_raw[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
predict_label = predict_labels[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
predict_prob = predict_probabilitys[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
if args.ups:
model_uncertainty = model_uncertintys[
count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
model_uncertainty_serialized = np.array(model_uncertainty, dtype=np.float32)
predict_labels_serialized = np.array(predict_label, dtype=np.int32)
predict_prob_serialized = np.array(predict_prob, dtype=np.float32)
if args.save_raw:
predict_raw_serialized = np.array(predict_raw, dtype=np.float32)
demo_pt_labs = pt_labs[count]
# get reference frame start and end index
st_id, end_id = int(ref_st_idx[count]), int(ref_end_idx[count])
# only select the reference frame points
if ref_st_idx is not None:
predict_labels_serialized = predict_labels_serialized[st_id:st_id + end_id]
predict_prob_serialized = predict_prob_serialized[st_id:st_id + end_id]
if args.save_raw:
predict_raw_serialized = predict_raw_serialized[st_id:st_id + end_id]
demo_pt_labs = demo_pt_labs[st_id:st_id + end_id]
if args.ups:
model_uncertainty_serialized = model_uncertainty_serialized[st_id:st_id + end_id]
if args.mode == 'val':
hist_list.append(fast_hist_crop(predict_labels_serialized, demo_pt_labs,
unique_label))
if args.ups:
tmp_hist, temp_count = fast_ups_crop(model_uncertainty_serialized, demo_pt_labs.flatten(),
unique_label)
ups_hist.append(tmp_hist)
ups_count.append(temp_count)
if args.save:
# convert the prediction into corresponding GT labels (inverse mapping)
# for index, label in enumerate(predict_labels_serialized):
# predict_labels_serialized[index] = SemKITTI_learningmap_inv[label]
# print(predict_labels_serialized.size)
predict_labels_serialized = np.vectorize(SemKITTI_learningmap_inv.__getitem__)(predict_labels_serialized)
# get frame and sequence name
sample_name = dataset_loader.dataset.point_cloud_dataset.im_idx[i_iter_val * batch_size + count][
-10:-4]
sequence_num = dataset_loader.dataset.point_cloud_dataset.im_idx[i_iter_val * batch_size + count].split('/')[-3]
# create destination path to save predictions
# path_to_seq_folder = path_to_save_predicted_labels + '/' + str(sequence_num)
path_to_seq_folder = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"predictions_f{T_past_frame}_{T_future_frame}")
path_to_seq_folder_prob = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"probability_f{T_past_frame}_{T_future_frame}")
if args.save_raw:
path_to_seq_folder_raw = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"raw_f{T_past_frame}_{T_future_frame}")
if args.challenge:
path_to_save_test_predicted_labels = args.challenge_path
path_to_seq_folder = os.path.join(path_to_save_test_predicted_labels,
f"f{T_past_frame}_{T_future_frame}", "sequences",
str(sequence_num),
"predictions")
if not os.path.exists(path_to_seq_folder):
os.makedirs(path_to_seq_folder)
# dump predictions and probability
predict_labels_serialized.tofile(path_to_seq_folder + '/' + sample_name + '.label')
if dataset_type == 'SemanticKITTI':
save_predictions_sematicKitti(predict_labels_serialized, predict_prob_serialized,
path_to_seq_folder, path_to_seq_folder_prob, sample_name, challenge=args.challenge)
elif dataset_type == 'WOD':
save_predictions_wod(predict_labels_serialized, predict_prob_serialized,
path_to_seq_folder, path_to_seq_folder_prob, sample_name, challenge=args.challenge)
else:
raise Exception(f'{dataset_type} dataset type not known')
# if not args.challenge:
# if not os.path.exists(path_to_seq_folder_prob):
# os.makedirs(path_to_seq_folder_prob)
# predict_prob_serialized.tofile(path_to_seq_folder_prob + '/' + sample_name + '.label')
# if args.save_raw:
# if not os.path.exists(path_to_seq_folder_raw):
# os.makedirs(path_to_seq_folder_raw)
#
# predict_prob_serialized.tofile(path_to_seq_folder_raw + '/' + sample_name + '.label')
# Validation with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_val, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, lcw) in tqdm(
enumerate(dataset_loader),
total=math.ceil(len(dataset_loader.dataset.point_cloud_dataset.im_idx) / batch_size)):
# call the validation and inference with
validation_inference(vox_label, grid, pt_labs, pt_fea, ref_st_idx=ref_st_idx, ref_end_idx=ref_end_idx,
lcw=lcw)
# print the validation per class iou and overall miou
if args.mode == 'val':
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
print('Current val miou is %.3f' % val_miou)
if args.ups:
uncertainty_hist = np.sum(ups_hist, axis=0) / np.sum(ups_count, axis=0)
plt.bar(range(20), uncertainty_hist, width=0.4)
plt.show()
print(uncertainty_hist)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/semantickitti_S0_0_T11_33_ssl_s20_p80.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument('-m', '--mode', default='val')
parser.add_argument('-s', '--save', default=True)
parser.add_argument('-c', '--challenge', default=False)
parser.add_argument('-p', '--challenge_path', default='/mnt/personal/gebreawe/Datasets/RealWorld/semantic-kitti'
'/challenge')
parser.add_argument('-u', '--ups', default=False)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('-r', '--save_raw', default=False)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args)
| 16,515 | 44.750693 | 137 | py |
T-Concord3D | T-Concord3D-master/concordance_kitti.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import time
import argparse
import sys
import numpy as np
import glob
import os
import shutil
import random
import math
# https://github.com/ctu-vras/T-Concord3D.git
def main(args):
teacher_1 = args.teacher1
teacher_2 = args.teacher2
teacher_3 = args.teacher3
lamda = args.lamda
concordance = args.concordance
source = args.source
destination = args.destination
sequence = ["00", "01", "02", "03", "04", "05", "06", "07", "09", "10"]
for i, sq in enumerate(sequence):
pred_t1 = sorted(glob.glob(os.path.join(source, sq, f"predictions_{teacher_1}", '*.label')))
pred_t2 = sorted(glob.glob(os.path.join(source, sq, f"predictions_{teacher_2}", '*.label')))
probs_t1 = sorted(glob.glob(os.path.join(source, sq, f"probability_{teacher_1}", '*.label')))
probs_t2 = sorted(glob.glob(os.path.join(source, sq, f"probability_{teacher_2}", '*.label')))
if teacher_3 is not None:
pred_t3 = sorted(glob.glob(os.path.join(source, sq, f"predictions_{teacher_3}", '*.label')))
probs_t3 = sorted(glob.glob(os.path.join(source, sq, f"probability_{teacher_3}", '*.label')))
frame_len = len(pred_t1)
for frame in range(frame_len):
frame_name = str(frame).zfill(6)
if teacher_3 is not None:
pred = np.array([np.fromfile(pred_t1[frame], dtype=np.int32).reshape((-1, 1)),
np.fromfile(pred_t2[frame], dtype=np.int32).reshape((-1, 1)),
np.fromfile(pred_t3[frame], dtype=np.int32).reshape((-1, 1))])
prob = np.array([np.fromfile(probs_t1[frame], dtype=np.float32).reshape((-1, 1)),
np.fromfile(probs_t2[frame], dtype=np.float32).reshape((-1, 1)),
np.fromfile(probs_t3[frame], dtype=np.float32).reshape((-1, 1))])
else:
pred = np.array([np.fromfile(pred_t1[frame], dtype=np.int32).reshape((-1, 1)),
np.fromfile(pred_t2[frame], dtype=np.int32).reshape((-1, 1))])
prob = np.array([np.fromfile(probs_t1[frame], dtype=np.float32).reshape((-1, 1)),
np.fromfile(probs_t2[frame], dtype=np.float32).reshape((-1, 1))])
max_pob = prob.max(axis=0)
max_pob_id = prob.argmax(axis=0)
best_pred = np.zeros_like(pred[0])
for j in range(len(max_pob)):
best_pred[j] = pred[int(max_pob_id[j]), j]
weight = np.zeros_like(best_pred)
for k in range(3):
predicted = pred[k]
concord = best_pred == predicted
weight += concord.astype(int)
best_prob = max_pob
new_prob = best_prob + ((weight - 1) * lamda)
new_prob = np.minimum(np.ones_like(best_prob), new_prob)
new_prob = new_prob.astype(np.float32)
if not os.path.exists(os.path.join(destination, sq, f"predictions_{concordance}")):
os.makedirs(os.path.join(destination, sq, f"predictions_{concordance}"))
os.makedirs(os.path.join(destination, sq, f"probability_{concordance}"))
best_pred.tofile(os.path.join(destination, sq, f"predictions_{concordance}", frame_name + '.label'))
new_prob.tofile(os.path.join(destination, sq, f"probability_{concordance}", frame_name + '.label'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--lamda', default=0.1, type=float)
parser.add_argument('-b', '--best', default=True, )
parser.add_argument('-x', '--teacher1', required=False, default="f1_1")
parser.add_argument('-y', '--teacher2', required=False, default="f2_2")
parser.add_argument('-z', '--teacher3', default="f3_3")
parser.add_argument('-c', '--concordance', default="11_33")
parser.add_argument('-s', '--source', default='/mnt/beegfs/gpu/argoverse-tracking-all-training/semantic-kitti'
'/train_pseudo_20/sequences')
parser.add_argument('-d', '--destination', default='/mnt/beegfs/gpu/argoverse-tracking-all-training/semantic'
'-kitti/train_pseudo_20/sequences')
args = parser.parse_args()
main(args)
| 4,467 | 44.131313 | 114 | py |
T-Concord3D | T-Concord3D-master/train_tconcord3d.py | # -*- coding:utf-8 -*-
# author: Awet
import argparse
import os
import sys
import time
import warnings
import numpy as np
import torch
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
from tqdm import tqdm
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from dataloader.pc_dataset import get_label_name, update_config
from utils.load_save_util import load_checkpoint
from utils.metric_util import per_class_iu, fast_hist_crop
from utils.trainer_function import Trainer
import copy
warnings.filterwarnings("ignore")
# clear/empty cached memory used by caching allocator
torch.cuda.empty_cache()
torch.cuda.memory_summary(device=None, abbreviated=False)
# training
epoch = 0
best_val_miou = 0
global_iter = 0
def main(args):
# pytorch_device = torch.device("cuda:2") # torch.device('cuda:2')
# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'true'
# os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '9994'
# os.environ['RANK'] = "0"
# If your script expects `--local_rank` argument to be set, please
# change it to read from `os.environ['LOCAL_RANK']` instead.
# args.local_rank = os.environ['LOCAL_RANK']
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
# send configs parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
train_dataloader_config = configs['train_data_loader']
val_dataloader_config = configs['val_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
source_val_batch_size = val_dataloader_config['batch_size']
source_train_batch_size = train_dataloader_config['batch_size']
target_train_batch_size = ssl_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
ssl = train_hypers['ssl']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_path = train_hypers['model_load_path']
model_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
# NB: no ignored class
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
model = model_builder.build(model_config).to(pytorch_device)
if os.path.exists(model_path):
model = load_checkpoint(model_path, model, map_location=pytorch_device)
# if args.mgpus:
# student_model = nn.DataParallel(student_model)
# #student_model.cuda()
# #student_model.cuda()
# student_model = student_model().to(pytorch_device)
# if args.local_rank >= 1:
if distributed:
model = DistributedDataParallel(
model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=False # True
)
if ssl:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=False, ssl=True, fl=False)
else:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=False, fl=False)
source_train_dataset_loader, source_val_dataset_loader, _, target_train_dataset_loader = data_builder.build(
dataset_config,
train_dataloader_config,
val_dataloader_config,
ssl_dataloader_config=ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
optimizer = optim.Adam(model.parameters(), lr=train_hypers["learning_rate"])
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer_student, max_lr=0.01,
# steps_per_epoch=len(source_train_dataset_loader),
# epochs=train_hypers["max_num_epochs"])
# global_iter = 0
check_iter = train_hypers['eval_every_n_steps']
global global_iter, best_val_miou, epoch
print("|-------------------------Training started-----------------------------------------|")
# Define training mode and function
trainer = Trainer(
model=model,
optimizer=optimizer,
ckpt_dir=model_path,
unique_label=unique_label,
unique_label_str=unique_label_str,
lovasz_softmax=lovasz_softmax,
loss_func=loss_func,
ignore_label=ignore_label,
train_mode="ema",
ssl=ssl,
eval_frequency=1,
pytorch_device=pytorch_device,
warmup_epoch=5,
ema_frequency=1)
trainer.fit(train_hypers["max_num_epochs"],
source_train_dataset_loader,
source_train_batch_size,
source_val_dataset_loader,
source_val_batch_size,
test_loader=None,
ckpt_save_interval=1,
lr_scheduler_each_iter=False)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/nuscenes_T3_3.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args) | 6,366 | 33.79235 | 112 | py |
T-Concord3D | T-Concord3D-master/train.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import time
import argparse
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from utils.metric_util import per_class_iu, fast_hist_crop
from dataloader.pc_dataset import get_label_name, update_config
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from utils.load_save_util import load_checkpoint
import warnings
from torch.nn.parallel import DistributedDataParallel
warnings.filterwarnings("ignore")
# training
epoch = 0
best_val_miou = 0
global_iter = 0
def main(args):
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
# send config parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
train_dataloader_config = configs['train_data_loader']
val_dataloader_config = configs['val_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
val_batch_size = val_dataloader_config['batch_size']
train_batch_size = train_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
ssl = train_hypers['ssl']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_load_path = train_hypers['model_load_path']
model_save_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
my_model = model_builder.build(model_config).to(pytorch_device)
if os.path.exists(model_load_path):
my_model = load_checkpoint(model_load_path, my_model, map_location=pytorch_device)
# if args.mgpus:
# my_model = nn.DataParallel(my_model)
# #my_model.cuda()
# #my_model.cuda()
if distributed:
my_model = DistributedDataParallel(
my_model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=True
)
# for weighted class loss
weighted_class = False
# for focal loss
focal_loss = False # True
# 20 class number of samples from training sample
class_weights = np.array([1.40014903e+00, 1.10968683e+00, 5.06321920e+02, 9.19710291e+01,
1.76627589e+01, 1.58902791e+01, 1.49002594e+02, 6.12058299e+02,
1.75137027e+03, 2.47504075e-01, 3.25237847e+00, 3.62211985e-01,
8.77872638e+00, 4.08248861e-01, 9.97997655e-01, 1.91585640e-01,
7.21493239e+00, 4.68076958e-01, 1.69628483e+01, 6.35032127e+01], dtype=np.float32)
per_class_weight = None
if focal_loss or weighted_class:
per_class_weight = torch.from_numpy(class_weights).to(pytorch_device)
optimizer = optim.Adam(my_model.parameters(), lr=train_hypers["learning_rate"])
if ssl:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=per_class_weight, ssl=True, fl=focal_loss)
else:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=per_class_weight, fl=focal_loss)
train_dataset_loader, val_dataset_loader, _, _ = data_builder.build(dataset_config,
train_dataloader_config,
val_dataloader_config,
ssl_dataloader_config=ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
class_count = np.zeros(20)
my_model.train()
# global_iter = 0
check_iter = train_hypers['eval_every_n_steps']
global global_iter, best_val_miou, epoch
print("|-------------------------Training started-----------------------------------------|")
print(f"focal_loss:{focal_loss}, weighted_cross_entropy: {weighted_class}")
while epoch < train_hypers['max_num_epochs']:
print(f"epoch: {epoch}")
loss_list = []
pbar = tqdm(total=len(train_dataset_loader))
time.sleep(5)
# lr_scheduler.step(epoch)
def valideting(hist_list, val_loss_list, val_vox_label, val_grid, val_pt_labs, val_pt_fea, ref_st_idx=None,
ref_end_idx=None, lcw=None):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]
val_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in val_grid]
val_label_tensor = val_vox_label.type(torch.LongTensor).to(pytorch_device)
predict_labels = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
# aux_loss = loss_fun(aux_outputs, point_label_tensor)
inp = val_label_tensor.size(0)
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
predict_labels = predict_labels[:inp, :, :, :, :]
if ssl:
lcw_tensor = torch.FloatTensor(lcw).to(pytorch_device)
loss = lovasz_softmax(torch.nn.functional.softmax(predict_labels).detach(), val_label_tensor,
ignore=ignore_label, lcw=lcw_tensor) + loss_func(predict_labels.detach(),
val_label_tensor, lcw=lcw_tensor)
else:
loss = lovasz_softmax(torch.nn.functional.softmax(predict_labels).detach(), val_label_tensor,
ignore=ignore_label) + loss_func(predict_labels.detach(), val_label_tensor)
predict_labels = torch.argmax(predict_labels, dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count, i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[
count, val_grid[count][:, 0], val_grid[count][:, 1],
val_grid[count][:, 2]], val_pt_labs[count],
unique_label))
val_loss_list.append(loss.detach().cpu().numpy())
return hist_list, val_loss_list
# if global_iter % check_iter == 0 and epoch >= 1:
if epoch >= 1:
my_model.eval()
hist_list = []
val_loss_list = []
with torch.no_grad():
# Validation with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_val, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, val_lcw) \
in enumerate(val_dataset_loader):
# call the validation and inference with
hist_list, val_loss_list = valideting(hist_list, val_loss_list, vox_label, grid, pt_labs,
pt_fea, ref_st_idx=ref_st_idx,
ref_end_idx=ref_end_idx,
lcw=val_lcw)
print(f"--------------- epoch: {epoch} ----------------")
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
# del val_vox_label, val_grid, val_pt_fea
# save model if performance is improved
if best_val_miou < val_miou:
best_val_miou = val_miou
if not os.path.exists(model_save_path.split('/')[-2]):
os.mkdir(os.path.join(model_save_path.split('/')[-2]))
torch.save(my_model.state_dict(), model_save_path)
print(f"Current val miou is {np.round(val_miou, 2)} while the best val miou is "
f"{np.round(best_val_miou, 2)}")
print(f"Current val loss is {np.round(np.mean(val_loss_list), 2)}")
def training(i_iter_train, train_vox_label, train_grid, pt_labels, train_pt_fea, ref_st_idx=None,
ref_end_idx=None, lcw=None):
global global_iter, best_val_miou, epoch
train_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in train_pt_fea]
train_vox_ten = [torch.from_numpy(i).to(pytorch_device) for i in train_grid]
point_label_tensor = train_vox_label.type(torch.LongTensor).to(pytorch_device)
# forward + backward + optimize
outputs = my_model(train_pt_fea_ten, train_vox_ten, train_batch_size)
inp = point_label_tensor.size(0)
# print(f"outputs.size() : {outputs.size()}")
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
outputs = outputs[:inp, :, :, :, :]
################################
if ssl:
lcw_tensor = torch.FloatTensor(lcw).to(pytorch_device)
loss = lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor, ignore=ignore_label,
lcw=lcw_tensor) + loss_func(
outputs, point_label_tensor, lcw=lcw_tensor)
else:
loss = lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=ignore_label) + loss_func(
outputs, point_label_tensor)
# TODO: check --> to mitigate only one element tensors can be converted to Python scalars
loss = loss.mean()
loss.backward()
optimizer.step()
loss_list.append(loss.item())
if global_iter % 1000 == 0:
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' %
(epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
optimizer.zero_grad()
global_iter += 1
if global_iter % 100 == 0:
pbar.update(100)
if global_iter % check_iter == 0:
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' %
(epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
my_model.train()
# training with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_train, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
train_dataset_loader):
# call the validation and inference with
training(i_iter_train, vox_label, grid, pt_labs, pt_fea, ref_st_idx=ref_st_idx, ref_end_idx=ref_end_idx,
lcw=lcw)
pbar.close()
epoch += 1
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/nuscenes_S0_0_T11_33_ssl_s20_p80.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args)
| 13,060 | 42.105611 | 120 | py |
T-Concord3D | T-Concord3D-master/tools/rename_kitti_train_pseudo.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import time
import argparse
import sys
import numpy as np
import numpy as np
import glob
from multiprocessing import Pool
import os
import shutil
import random
import math
def main():
#sequence = ["04"]
#des_seq = ["34"]
sequence = ["00", "01", "02","03", "04", "05", "06", "07", "09", "10"]
#des_seq = ["11", "12", "13","14", "15", "16", "17", "18", "19", "20"]
source = '/mnt/beegfs/gpu/argoverse-tracking-all-training/semantic-kitti/train_pseudo_40/sequences'
destination = '/mnt/beegfs/gpu/argoverse-tracking-all-training/semantic-kitti/train_pseudo_40/sequences'
for i, sq in enumerate(sequence):
files = sorted(glob.glob(os.path.join(source, sq, "velodyne", '*.bin')))
total_frame = range(len(files))
for frame, data in enumerate(files):
frame_name = str(frame).zfill(6)
frame_data = data[-10:-4]
os.rename(os.path.join(source, sq, "velodyne", frame_data + '.bin'), os.path.join( source, sq, "velodyne", frame_name + '.bin'))
os.rename(os.path.join(source, sq, "labels", frame_data + '.label'), os.path.join(source, sq, "labels", frame_name + '.label'))
# shutil.copy(os.path.join(source, sq, "calib.txt"), os.path.join(destination, des_seq[i], "calib.txt"))
# shutil.copy(os.path.join(source, sq, "poses.txt"), os.path.join(destination, des_seq[i], "poses.txt"))
# shutil.copy(os.path.join(source, sq, "times.txt"), os.path.join( destination, des_seq[i], "times.txt"))
if __name__ == '__main__':
main()
print(f"------------------------------Task finished-------------------------")
| 1,739 | 33.8 | 140 | py |
T-Concord3D | T-Concord3D-master/config/config.py | # -*- coding:utf-8 -*-
from pathlib import Path
from strictyaml import Bool, Float, Int, Map, Seq, Str, as_document, load
model_params = Map(
{
"model_architecture": Str(),
"output_shape": Seq(Int()),
"fea_dim": Int(),
"out_fea_dim": Int(),
"num_class": Int(),
"num_input_features": Int(),
"use_norm": Bool(),
"init_size": Int(),
}
)
dataset_params = Map(
{
"dataset_type": Str(),
"pc_dataset_type": Str(),
"ignore_label": Int(),
"return_test": Bool(),
"fixed_volume_space": Bool(),
"label_mapping": Str(),
"max_volume_space": Seq(Float()),
"min_volume_space": Seq(Float()),
}
)
train_data_loader = Map(
{
"data_path": Str(),
"imageset": Str(),
"return_ref": Bool(),
"batch_size": Int(),
"shuffle": Bool(),
"num_workers": Int(),
}
)
val_data_loader = Map(
{
"data_path": Str(),
"imageset": Str(),
"return_ref": Bool(),
"batch_size": Int(),
"shuffle": Bool(),
"num_workers": Int(),
}
)
test_data_loader = Map(
{
"data_path": Str(),
"imageset": Str(),
"return_ref": Bool(),
"batch_size": Int(),
"shuffle": Bool(),
"num_workers": Int(),
}
)
ssl_data_loader = Map(
{
"data_path": Str(),
"imageset": Str(),
"return_ref": Bool(),
"batch_size": Int(),
"shuffle": Bool(),
"num_workers": Int(),
}
)
train_params = Map(
{
"model_load_path": Str(),
"model_save_path": Str(),
"checkpoint_every_n_steps": Int(),
"max_num_epochs": Int(),
"eval_every_n_steps": Int(),
"learning_rate": Float(),
"past": Int(),
"future": Int(),
"T_past": Str(),
"T_future": Str(),
"ssl": Bool(),
"rgb": Bool(),
}
)
schema_v4 = Map(
{
"format_version": Int(),
"model_params": model_params,
"dataset_params": dataset_params,
"train_data_loader": train_data_loader,
"val_data_loader": val_data_loader,
"test_data_loader": test_data_loader,
"ssl_data_loader": ssl_data_loader,
"train_params": train_params,
}
)
SCHEMA_FORMAT_VERSION_TO_SCHEMA = {4: schema_v4}
def load_config_data(path: str) -> dict:
yaml_string = Path(path).read_text()
cfg_without_schema = load(yaml_string, schema=None)
schema_version = int(cfg_without_schema["format_version"])
if schema_version not in SCHEMA_FORMAT_VERSION_TO_SCHEMA:
raise Exception(f"Unsupported schema format version: {schema_version}.")
strict_cfg = load(yaml_string, schema=SCHEMA_FORMAT_VERSION_TO_SCHEMA[schema_version])
cfg: dict = strict_cfg.data
return cfg
def config_data_to_config(data): # type: ignore
return as_document(data, schema_v4)
def save_config_data(data: dict, path: str) -> None:
cfg_document = config_data_to_config(data)
with open(Path(path), "w") as f:
f.write(cfg_document.as_yaml())
| 3,134 | 22.931298 | 90 | py |
T-Concord3D | T-Concord3D-master/config/__init__.py | # -*- coding:utf-8 -*-
| 23 | 11 | 22 | py |
T-Concord3D | T-Concord3D-master/builder/loss_builder.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
from utils.lovasz_losses import lovasz_softmax, lovasz_softmax_lcw, cross_entropy_lcw
from utils.loss_func import FocalLoss
def build(wce=True, lovasz=True, num_class=20, ignore_label=None, weights=None, ssl=False, fl=False):
# focal loss and semisupervised learning
if ssl and fl:
if wce and lovasz:
return FocalLoss(weight=weights, ignore_index=ignore_label), lovasz_softmax_lcw
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax_lcw
# only semi-supervised learning
if ssl:
if wce and lovasz:
return cross_entropy_lcw, lovasz_softmax_lcw
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax_lcw
# focal loss on GT (fully supervised)
if fl:
loss_funs = FocalLoss(weight=weights, ignore_index=ignore_label)
else:
loss_funs = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
if wce and lovasz:
return loss_funs, lovasz_softmax
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax
else:
raise NotImplementedError
| 1,319 | 30.428571 | 101 | py |
T-Concord3D | T-Concord3D-master/builder/model_builder.py | # -*- coding:utf-8 -*-
from model.cylinder_3d import get_model_class
from model.segment_3d import Asymm_3d_spconv
from model.cylinder_feature import cylinder_fea
def build(model_config):
output_shape = model_config['output_shape']
num_class = model_config['num_class']
num_input_features = model_config['num_input_features']
use_norm = model_config['use_norm']
init_size = model_config['init_size']
fea_dim = model_config['fea_dim']
out_fea_dim = model_config['out_fea_dim']
cylinder_3d_spconv_seg = Asymm_3d_spconv(
output_shape=output_shape,
use_norm=use_norm,
num_input_features=num_input_features,
init_size=init_size,
nclasses=num_class)
cy_fea_net = cylinder_fea(grid_size=output_shape,
fea_dim=fea_dim,
out_pt_fea_dim=out_fea_dim,
fea_compre=num_input_features)
model = get_model_class(model_config["model_architecture"])(
cylin_model=cy_fea_net,
segmentator_spconv=cylinder_3d_spconv_seg,
sparse_shape=output_shape
)
return model
| 1,147 | 30.888889 | 64 | py |
T-Concord3D | T-Concord3D-master/builder/data_builder.py | # -*- coding:utf-8 -*-
import torch
from dataloader.dataset_semantickitti import get_model_class, collate_fn_BEV, collate_fn_BEV_tta
from dataloader.pc_dataset import get_pc_model_class
def build(dataset_config,
train_dataloader_config,
val_dataloader_config,
test_dataloader_config=None,
ssl_dataloader_config=None,
grid_size=[480, 360, 32], use_tta=False, train_hypers=None):
train_data_path = train_dataloader_config["data_path"]
train_imageset = train_dataloader_config["imageset"]
val_data_path = val_dataloader_config["data_path"]
val_imageset = val_dataloader_config["imageset"]
train_ref = train_dataloader_config["return_ref"]
val_ref = val_dataloader_config["return_ref"]
if test_dataloader_config is not None:
test_data_path = test_dataloader_config["data_path"]
test_imageset = test_dataloader_config["imageset"]
test_ref = test_dataloader_config["return_ref"]
# ssl data path for Semi-Supervised training
ssl_data_path = None
if ssl_dataloader_config is not None:
ssl_data_path = ssl_dataloader_config["data_path"]
ssl_imageset = ssl_dataloader_config["imageset"]
ssl_ref = ssl_dataloader_config["return_ref"]
label_mapping = dataset_config["label_mapping"]
SemKITTI = get_pc_model_class(dataset_config['pc_dataset_type'])
nusc = None
if "nusc" in dataset_config['pc_dataset_type']:
from nuscenes import NuScenes
nusc = NuScenes(version='v1.0-trainval', dataroot=train_data_path, verbose=True)
# if we want to train in SSL mode
if train_hypers and ssl_dataloader_config and train_hypers['ssl']:
train_pt_dataset = SemKITTI(train_data_path, imageset=train_imageset,
return_ref=train_ref, label_mapping=label_mapping,
train_hypers=train_hypers, ssl_data_path=ssl_data_path)
else:
train_pt_dataset = SemKITTI(train_data_path, imageset=train_imageset,
return_ref=train_ref, label_mapping=label_mapping,
train_hypers=train_hypers, ssl_data_path=None)
val_pt_dataset = SemKITTI(val_data_path, imageset=val_imageset,
return_ref=val_ref, label_mapping=label_mapping, train_hypers=train_hypers)
if test_dataloader_config is not None:
test_pt_dataset = SemKITTI(test_data_path, imageset=test_imageset,
return_ref=test_ref, label_mapping=label_mapping, train_hypers=train_hypers)
if ssl_dataloader_config is not None:
ssl_pt_dataset = SemKITTI(ssl_data_path, imageset=ssl_imageset,
return_ref=ssl_ref, label_mapping=label_mapping, train_hypers=train_hypers)
train_dataset = get_model_class(dataset_config['dataset_type'])(
train_pt_dataset,
grid_size=grid_size,
flip_aug=True,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
transform_aug=True
)
if use_tta:
val_dataset = get_model_class(dataset_config['dataset_type'])(
val_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
return_test=True,
use_tta=True
)
collate_fn_BEV_tmp = collate_fn_BEV_tta
else:
val_dataset = get_model_class(dataset_config['dataset_type'])(
val_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
collate_fn_BEV_tmp = collate_fn_BEV
if use_tta:
if test_dataloader_config is not None:
test_dataset = get_model_class(dataset_config['dataset_type'])(
test_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
return_test=True,
use_tta=True
)
collate_fn_BEV_tmp = collate_fn_BEV_tta
else:
if test_dataloader_config is not None:
test_dataset = get_model_class(dataset_config['dataset_type'])(
test_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
collate_fn_BEV_tmp = collate_fn_BEV
if ssl_dataloader_config is not None:
ssl_dataset = get_model_class(dataset_config['dataset_type'])(
ssl_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
train_dataset_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=train_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV,
shuffle=train_dataloader_config["shuffle"],
num_workers=train_dataloader_config["num_workers"])
val_dataset_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=val_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV_tmp,
shuffle=val_dataloader_config["shuffle"],
num_workers=val_dataloader_config["num_workers"])
test_dataset_loader = None
if test_dataloader_config is not None:
test_dataset_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=test_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV_tmp,
shuffle=test_dataloader_config["shuffle"],
num_workers=test_dataloader_config["num_workers"])
ssl_dataset_loader = None
if ssl_dataloader_config is not None:
ssl_dataset_loader = torch.utils.data.DataLoader(dataset=ssl_dataset,
batch_size=ssl_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV,
shuffle=ssl_dataloader_config["shuffle"],
num_workers=ssl_dataloader_config["num_workers"])
return train_dataset_loader, val_dataset_loader, test_dataset_loader, ssl_dataset_loader
| 8,112 | 48.469512 | 111 | py |
T-Concord3D | T-Concord3D-master/builder/__init__.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
| 82 | 19.75 | 29 | py |
T-Concord3D | T-Concord3D-master/utils/trainer_function.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# at 8/10/22
# --------------------------|
import argparse
import os
import sys
import time
import warnings
import numpy as np
import torch
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
from tqdm import tqdm
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from dataloader.pc_dataset import get_label_name, update_config
from utils.load_save_util import load_checkpoint
from utils.metric_util import per_class_iu, fast_hist_crop
import copy
def yield_target_dataset_loader(n_epochs, target_train_dataset_loader):
for e in range(n_epochs):
for i_iter_train, (_, train_vox_label, train_grid, _, train_pt_fea, ref_st_idx, ref_end_idx, lcw) \
in enumerate(target_train_dataset_loader):
yield train_vox_label, train_grid, train_pt_fea, ref_st_idx, ref_end_idx, lcw
class Trainer(object):
def __init__(self,
model,
optimizer,
ckpt_dir,
unique_label,
unique_label_str,
lovasz_softmax,
loss_func,
ignore_label,
train_mode=None,
ssl=None,
eval_frequency=1,
pytorch_device=0,
warmup_epoch=1,
ema_frequency=5):
self.model = model
self.optimizer = optimizer
self.model_save_path = ckpt_dir
self.unique_label = unique_label
self.unique_label_str = unique_label_str
self.eval_frequency = eval_frequency
self.lovasz_softmax = lovasz_softmax
self.loss_func = loss_func
self.ignore_label = ignore_label
self.train_mode = train_mode
self.ssl = ssl
self.pytorch_device = pytorch_device
self.warmup_epoch = warmup_epoch
self.ema_frequency = ema_frequency
self.val = False
self.best_val_miou = 0
self.progress_value = 100
def criterion(self, outputs, point_label_tensor, lcw=None):
if self.ssl:
lcw_tensor = torch.FloatTensor(lcw).to(self.pytorch_device)
loss = self.lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=self.ignore_label, lcw=lcw_tensor) \
+ self.loss_func(outputs, point_label_tensor, lcw=lcw_tensor)
else:
loss = self.lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=self.ignore_label) \
+ self.loss_func(outputs, point_label_tensor)
return loss
def validate(self, my_model, val_dataset_loader, val_batch_size, test_loader=None, ssl=None):
hist_list = []
val_loss_list = []
my_model.eval()
with torch.no_grad():
for i_iter_val, (
_, val_vox_label, val_grid, val_pt_labs, val_pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
val_dataset_loader):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(self.pytorch_device) for i in
val_pt_fea]
val_grid_ten = [torch.from_numpy(i).to(self.pytorch_device) for i in val_grid]
val_label_tensor = val_vox_label.type(torch.LongTensor).to(self.pytorch_device)
predict_labels = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
# aux_loss = loss_fun(aux_outputs, point_label_tensor)
inp = val_label_tensor.size(0)
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
predict_labels = predict_labels[:inp, :, :, :, :]
# loss = self.criterion(predict_labels, val_label_tensor, lcw)
predict_labels = torch.argmax(predict_labels, dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count, i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[
count, val_grid[count][:, 0], val_grid[count][:, 1],
val_grid[count][:, 2]], val_pt_labs[count],
self.unique_label))
# val_loss_list.append(loss.detach().cpu().numpy())
return hist_list, val_loss_list
def fit(self, n_epochs, source_train_dataset_loader, train_batch_size, val_dataset_loader,
val_batch_size, test_loader=None, ckpt_save_interval=1, lr_scheduler_each_iter=False):
global_iter = 1
best_val_miou = 0
for epoch in range(n_epochs):
pbar = tqdm(total=len(source_train_dataset_loader))
# train the model
loss_list = []
self.model.train()
# training with multi-frames and ssl:
for i_iter_train, (
_, train_vox_label, train_grid, _, train_pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
source_train_dataset_loader):
# call the validation and inference with
train_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(self.pytorch_device) for i in
train_pt_fea]
# train_grid_ten = [torch.from_numpy(i[:,:2]).to(self.pytorch_device) for i in train_grid]
train_vox_ten = [torch.from_numpy(i).to(self.pytorch_device) for i in train_grid]
point_label_tensor = train_vox_label.type(torch.LongTensor).to(self.pytorch_device)
# forward + backward + optimize
outputs = self.model(train_pt_fea_ten, train_vox_ten, train_batch_size)
inp = point_label_tensor.size(0)
# print(f"outputs.size() : {outputs.size()}")
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
outputs = outputs[:inp, :, :, :, :]
################################
loss = self.criterion(outputs, point_label_tensor, lcw)
# TODO: check --> to mitigate only one element tensors can be converted to Python scalars
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# Uncomment to use the learning rate scheduler
# scheduler.step()
loss_list.append(loss.item())
if global_iter % self.progress_value == 0:
pbar.update(self.progress_value)
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' % (epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
global_iter += 1
# ----------------------------------------------------------------------#
# Evaluation/validation
with torch.no_grad():
hist_list, val_loss_list = self.validate(self.model, val_dataset_loader, val_batch_size,
test_loader, self.ssl)
# ----------------------------------------------------------------------#
# Print validation mIoU and Loss
print(f"--------------- epoch: {epoch} ----------------")
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(self.unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
# del val_vox_label, val_grid, val_pt_fea
# save model if performance is improved
if best_val_miou < val_miou:
best_val_miou = val_miou
torch.save(self.model.state_dict(), self.model_save_path)
print('Current val miou is %.3f while the best val miou is %.3f' %
(val_miou, best_val_miou))
# print('Current val loss is %.3f' % (np.mean(val_loss_list)))
| 8,482 | 42.953368 | 116 | py |
T-Concord3D | T-Concord3D-master/utils/load_save_util.py | # -*- coding:utf-8 -*-
import torch
def load_checkpoint(model_load_path, model, map_location=None):
my_model_dict = model.state_dict()
if map_location is not None:
pre_weight = torch.load(model_load_path, map_location=f'cuda:{map_location}')
else:
pre_weight = torch.load(model_load_path)
part_load = {}
match_size = 0
nomatch_size = 0
for k in pre_weight.keys():
value = pre_weight[k]
if k[:7] == 'module.':
k=k[7:]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
#print("loading ", k)
match_size += 1
part_load[k] = value
else:
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
def load_checkpoint_1b1(model_load_path, model):
my_model_dict = model.state_dict()
pre_weight = torch.load(model_load_path)
part_load = {}
match_size = 0
nomatch_size = 0
pre_weight_list = [*pre_weight]
my_model_dict_list = [*my_model_dict]
for idx in range(len(pre_weight_list)):
key_ = pre_weight_list[idx]
key_2 = my_model_dict_list[idx]
value_ = pre_weight[key_]
if my_model_dict[key_2].shape == pre_weight[key_].shape:
# print("loading ", k)
match_size += 1
part_load[key_2] = value_
else:
print(key_)
print(key_2)
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
| 1,772 | 26.703125 | 92 | py |
T-Concord3D | T-Concord3D-master/utils/metric_util.py | # -*- coding:utf-8 -*-
import numpy as np
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
bin_count = np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2)
return bin_count[:n ** 2].reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def fast_hist_crop(output, target, unique_label):
hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label) + 2)
hist = hist[unique_label + 1, :]
hist = hist[:, unique_label + 1]
return hist
# TODO: check if this implemented correctly
def fast_ups_crop(uncrt, target, unique_label):
hist = [np.sum(uncrt[target==i]) for i in range(20)]
va, cla_count = np.unique(target, return_counts=True)
class_count = np.zeros(20)
class_count[va] = cla_count
return hist, class_count
| 868 | 27.966667 | 82 | py |
T-Concord3D | T-Concord3D-master/utils/__init__.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: __init__.py.py
| 64 | 15.25 | 24 | py |
T-Concord3D | T-Concord3D-master/utils/ups.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
def enable_dropout(model):
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
m.train()
| 218 | 20.9 | 54 | py |
T-Concord3D | T-Concord3D-master/utils/lovasz_losses.py | # -*- coding:utf-8 -*-
# author: Xinge
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""
from __future__ import print_function, division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
#3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
#--------------------------------------------------------------------------------------------------------------------#
#---------------------- segmentation confidence probability waited loss function---------------------#
def cross_entropy_lcw(probas, labels, ignore_label=0, weights=None, lcw=None):
raw_loss_funs = torch.nn.CrossEntropyLoss(ignore_index=ignore_label, reduction='none') # weight label as GT and pseudo
#los_func = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
if lcw is not None:
#loss = los_func(probas, labels)
norm_lcw = (lcw/100.0)
raw_loss = raw_loss_funs(probas, labels)
weighted_loss = (raw_loss * lcw).mean()
else:
raise "error: per label weight is none"
return weighted_loss
def lovasz_softmax_lcw(probas, labels, classes='present', per_image=False, ignore=None, lcw=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat_lcw(*flatten_probas_lcw(prob.unsqueeze(0), lab.unsqueeze(0), ignore, lcw), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat_lcw(*flatten_probas_lcw(probas, labels, ignore, lcw), classes=classes)
return loss
def lovasz_softmax_flat_lcw(probas, labels, lcw=None, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
# multiply loss/error by the confidence probability
norm_lcw = (lcw/100.0)
errors *= norm_lcw
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas_lcw(probas, labels, ignore=None, lcw=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
#3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
lcw = lcw.view(-1)
if ignore is None:
return probas, labels, lcw
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
vlcw = lcw[valid]
return vprobas, vlabels, vlcw
#---------------------End of prediction confidence weighted lovasz_softmax loss ----------------------------#
#--------------------------------------------------------------------------------------------------------------------#
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def jaccard_loss(probas, labels,ignore=None, smooth = 100, bk_class = None):
"""
Something wrong with this loss
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
true_1_hot = torch.eye(vprobas.shape[1])[vlabels]
if bk_class:
one_hot_assignment = torch.ones_like(vlabels)
one_hot_assignment[vlabels == bk_class] = 0
one_hot_assignment = one_hot_assignment.float().unsqueeze(1)
true_1_hot = true_1_hot*one_hot_assignment
true_1_hot = true_1_hot.to(vprobas.device)
intersection = torch.sum(vprobas * true_1_hot)
cardinality = torch.sum(vprobas + true_1_hot)
loss = (intersection + smooth / (cardinality - intersection + smooth)).mean()
return (1-loss)*smooth
def hinge_jaccard_loss(probas, labels,ignore=None, classes = 'present', hinge = 0.1, smooth =100):
"""
Multi-class Hinge Jaccard loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
C = vprobas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
if c in vlabels:
c_sample_ind = vlabels == c
cprobas = vprobas[c_sample_ind,:]
non_c_ind =np.array([a for a in class_to_sum if a != c])
class_pred = cprobas[:,c]
max_non_class_pred = torch.max(cprobas[:,non_c_ind],dim = 1)[0]
TP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.) + smooth
FN = torch.sum(torch.clamp(max_non_class_pred - class_pred, min = -hinge)+hinge)
if (~c_sample_ind).sum() == 0:
FP = 0
else:
nonc_probas = vprobas[~c_sample_ind,:]
class_pred = nonc_probas[:,c]
max_non_class_pred = torch.max(nonc_probas[:,non_c_ind],dim = 1)[0]
FP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.)
losses.append(1 - TP/(TP+FP+FN))
if len(losses) == 0: return 0
return mean(losses)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
| 15,675 | 36.864734 | 131 | py |
T-Concord3D | T-Concord3D-master/utils/loss_func.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, weight=None, ignore_index=None,
gamma=2., reduction='none', ssl=False):
nn.Module.__init__(self)
self.ignore_index = ignore_index
self.weight = weight
self.gamma = gamma
self.reduction = reduction
self.ssl = ssl
def forward(self, input_tensor, target_tensor, lcw=None):
log_prob = F.log_softmax(input_tensor, dim=1)
prob = torch.exp(log_prob)
raw_loss = F.nll_loss(
((1 - prob) ** self.gamma) * log_prob,
target_tensor,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index
)
if self.ssl and lcw is not None:
norm_lcw = (lcw/100.0)
weighted_loss = (raw_loss * lcw).mean()
return weighted_loss
else:
return raw_loss.mean()
class WeightedFocalLoss(nn.Module):
"Non weighted version of Focal Loss"
def __init__(self, weight=None, ignore_index=None,
gamma=2., reduction='none', ssl=False):
super().__init__()
self.ignore_index = ignore_index
self.weight = weight
self.gamma = gamma
self.reduction = reduction
self.ssl = ssl
def forward(self, inputs, targets):
inputs = inputs.squeeze()
targets = targets.squeeze()
BCE_loss = F.cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss)
F_loss = self.weights[targets]*(1-pt)**self.gamma * BCE_loss
return F_loss.mean()
| 1,766 | 28.949153 | 69 | py |
T-Concord3D | T-Concord3D-master/utils/log_util.py | # -*- coding:utf-8 -*-
def save_to_log(logdir, logfile, message):
f = open(logdir + '/' + logfile, "a")
f.write(message + '\n')
f.close()
return | 160 | 25.833333 | 42 | py |
T-Concord3D | T-Concord3D-master/model/cylinder_3d.py | # -*- coding:utf-8 -*-
import torch
from torch import nn
REGISTERED_MODELS_CLASSES = {}
def register_model(cls, name=None):
global REGISTERED_MODELS_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_MODELS_CLASSES, f"exist class: {REGISTERED_MODELS_CLASSES}"
REGISTERED_MODELS_CLASSES[name] = cls
return cls
def get_model_class(name):
global REGISTERED_MODELS_CLASSES
assert name in REGISTERED_MODELS_CLASSES, f"available class: {REGISTERED_MODELS_CLASSES}"
return REGISTERED_MODELS_CLASSES[name]
@register_model
class cylinder_asym(nn.Module):
def __init__(self,
cylin_model,
segmentator_spconv,
sparse_shape,
):
super().__init__()
self.name = "cylinder_asym"
self.cylinder_3d_generator = cylin_model
self.cylinder_3d_spconv_seg = segmentator_spconv
self.sparse_shape = sparse_shape
def forward(self, train_pt_fea_ten, train_vox_ten, batch_size, val_grid=None, voting_num=4, use_tta=False):
coords, features_3d = self.cylinder_3d_generator(train_pt_fea_ten, train_vox_ten)
# spatial_features = self.cylinder_3d_spconv_seg(features_3d, coords, batch_size)
#
# return spatial_features
if use_tta:
batch_size *= voting_num
spatial_features = self.cylinder_3d_spconv_seg(features_3d, coords, batch_size)
if use_tta:
features_ori = torch.split(spatial_features, 1, dim=0)
fused_predict = features_ori[0][0, :, val_grid[0][:, 0], val_grid[0][:, 1], val_grid[0][:, 2]]
for idx in range(1, voting_num, 1):
fused_predict += features_ori[idx][0, :, val_grid[idx][:, 0], val_grid[idx][:, 1], val_grid[idx][:, 2]]
return fused_predict
else:
return spatial_features
| 1,901 | 31.793103 | 119 | py |
T-Concord3D | T-Concord3D-master/model/segment_3d.py | # -*- coding:utf-8 -*-
import numpy as np
#import spconv
import spconv.pytorch as spconv
import torch
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
#elf.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = self.conv1_2(shortcut)
shortcut = shortcut.replace_feature(self.act1_2(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0_2(shortcut.features))
resA = self.conv2(x)
resA = resA.replace_feature(self.act2(resA.features))
resA = resA.replace_feature(self.bn1(resA.features))
resA = self.conv3(resA)
resA = resA.replace_feature(self.act3(resA.features))
resA = resA.replace_feature(self.bn2(resA.features))
resA = resA.replace_feature(resA.features + shortcut.features)
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
#self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
# self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
# self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
if pooling:
if height_pooling:
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2, 2, 1),
padding=1, indice_key=indice_key, bias=False)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = self.conv1_2(shortcut)
shortcut = shortcut.replace_feature(self.act1_2(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0_2(shortcut.features))
resA = self.conv2(x)
resA = resA.replace_feature(self.act2(resA.features))
resA = resA.replace_feature(self.bn1(resA.features))
resA = self.conv3(resA)
resA = resA.replace_feature(self.act3(resA.features))
resA = resA.replace_feature(self.bn2(resA.features))
resA = resA.replace_feature(resA.features + shortcut.features)
if self.pooling:
resB = self.pool(resA)
return resB, resA
else:
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None, dropout_rate=0.25):
super(UpBlock, self).__init__()
#self.drop_out = drop_out
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key + "new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv2 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# TODO: commnet this drop out after experiment
# self.dropout3 = nn.Dropout3d(p=dropout_rate) # added by me
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, skip):
upA = self.trans_dilao(x)
upA = upA.replace_feature(self.trans_act(upA.features))
upA = upA.replace_feature(self.trans_bn(upA.features))
## upsample
upA = self.up_subm(upA)
upA = upA.replace_feature(upA.features + skip.features)
upE = self.conv1(upA)
upE = upE.replace_feature(self.act1(upE.features))
upE = upE.replace_feature(self.bn1(upE.features))
upE = self.conv2(upE)
upE = upE.replace_feature(self.act2(upE.features))
upE = upE.replace_feature(self.bn2(upE.features))
upE = self.conv3(upE)
upE = upE.replace_feature(self.act3(upE.features))
# TODO: comment this drop out after experiment
# upE = upE.replace_feature(self.bn3(self.dropout3(upE.features))) # added by me
upE = upE.replace_feature(self.bn3(upE.features)) # original implementation
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut2 = self.conv1_2(x)
shortcut2 = shortcut2.replace_feature(self.bn0_2(shortcut2.features))
shortcut2 = shortcut2.replace_feature(self.act1_2(shortcut2.features))
shortcut3 = self.conv1_3(x)
shortcut3 = shortcut.replace_feature(self.bn0_3(shortcut3.features))
shortcut3 = shortcut3.replace_feature(self.act1_3(shortcut3.features))
shortcut = shortcut.replace_feature(shortcut.features + shortcut2.features + shortcut3.features)
shortcut = shortcut.replace_feature(shortcut.features * x.features)
return shortcut
class Asymm_3d_spconv(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses=20, n_height=32, strict=False, init_size=16):
super(Asymm_3d_spconv, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False,
indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False,
indice_key="down5")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
self.ReconNet = ReconBlock(2 * init_size, 2 * init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1,
bias=True)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e = up0e.replace_feature(torch.cat((up0e.features, up1e.features), 1))
logits = self.logits(up0e)
y = logits.dense()
return y
| 13,253 | 41.07619 | 121 | py |
T-Concord3D | T-Concord3D-master/model/__init__.py | # -*- coding:utf-8 -*-
| 23 | 11 | 22 | py |
T-Concord3D | T-Concord3D-master/model/cylinder_feature.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numba as nb
import multiprocessing
import torch_scatter
class cylinder_fea(nn.Module):
def __init__(self, grid_size, fea_dim=3,
out_pt_fea_dim=64, max_pt_per_encode=64, fea_compre=None):
super(cylinder_fea, self).__init__()
self.PPmodel = nn.Sequential(
nn.BatchNorm1d(fea_dim),
nn.Linear(fea_dim, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, out_pt_fea_dim)
)
self.max_pt = max_pt_per_encode
self.fea_compre = fea_compre
self.grid_size = grid_size
kernel_size = 3
self.local_pool_op = torch.nn.MaxPool2d(kernel_size, stride=1,
padding=(kernel_size - 1) // 2,
dilation=1)
self.pool_dim = out_pt_fea_dim
# point feature compression
if self.fea_compre is not None:
self.fea_compression = nn.Sequential(
nn.Linear(self.pool_dim, self.fea_compre),
nn.ReLU())
self.pt_fea_dim = self.fea_compre
else:
self.pt_fea_dim = self.pool_dim
def forward(self, pt_fea, xy_ind):
cur_dev = pt_fea[0].get_device()
# concate everything
cat_pt_ind = []
# for i_batch in range(len(xy_ind)):
# cat_pt_ind.append(F.pad(xy_ind[i_batch], (1, 0), 'constant', value=i_batch))
# Awet Optimized append into list comprehension for faster runtime
cat_pt_ind = [F.pad(xy_ind[i_batch], (1, 0), 'constant', value=i_batch) for i_batch in range(len(xy_ind)) ]
cat_pt_fea = torch.cat(pt_fea, dim=0)
cat_pt_ind = torch.cat(cat_pt_ind, dim=0)
pt_num = cat_pt_ind.shape[0]
# shuffle the data
shuffled_ind = torch.randperm(pt_num, device=cur_dev)
cat_pt_fea = cat_pt_fea[shuffled_ind, :]
cat_pt_ind = cat_pt_ind[shuffled_ind, :]
# unique xy grid index
unq, unq_inv, unq_cnt = torch.unique(cat_pt_ind, return_inverse=True, return_counts=True, dim=0)
unq = unq.type(torch.int64)
# process feature
processed_cat_pt_fea = self.PPmodel(cat_pt_fea)
pooled_data = torch_scatter.scatter_max(processed_cat_pt_fea, unq_inv, dim=0)[0]
if self.fea_compre:
processed_pooled_data = self.fea_compression(pooled_data)
else:
processed_pooled_data = pooled_data
return unq, processed_pooled_data
| 2,812 | 30.965909 | 115 | py |
T-Concord3D | T-Concord3D-master/dataloader/dataset_semantickitti.py | # -*- coding:utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
from torch.utils import data
import pickle
REGISTERED_DATASET_CLASSES = {}
def register_dataset(cls, name=None):
global REGISTERED_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_DATASET_CLASSES, f"exist class: {REGISTERED_DATASET_CLASSES}"
REGISTERED_DATASET_CLASSES[name] = cls
return cls
def get_model_class(name):
global REGISTERED_DATASET_CLASSES
assert name in REGISTERED_DATASET_CLASSES, f"available class: {REGISTERED_DATASET_CLASSES}"
return REGISTERED_DATASET_CLASSES[name]
@register_dataset
class voxel_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, 50, 1.5], min_volume_space=[-50, -50, -3],
cut_mix=False):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.flip_aug = flip_aug
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
# TODO check if the cut and mix augmentation is implemented correctly
self.cut_mix = cut_mix
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
# initialization
xyz = None
labels = None
sig = None
lcw = None
ref_st_ind = None
ref_end_ind = None
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
else:
raise Exception('Return invalid data tuple')
# TODO: check --------------------------------
# cut mix data augmentation by grabbing instance and add it to a new scene
if self.cut_mix:
# load/grab the object
dir = '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Labeled/cut_mix'
new_xyz = np.load(f"{dir}/pcl.npy")
new_label_all = np.load(f"{dir}/ss_id.npy")
unique_obj = np.unique(new_label_all[:, 0])
sel_obj_rand = np.random.choice(len(unique_obj), 5)
new_label = []
for id in sel_obj_rand:
obj_mask = new_label_all[:, 0] == id
new_label.append(new_label_all[obj_mask])
new_label = np.concatenate(new_label, axis=0)
# perform random flipping and rotation
flip_type = np.random.choice(4, 1)
if flip_type == 1:
new_xyz[:, 0] = -new_xyz[:, 0]
elif flip_type == 2:
new_xyz[:, 1] = -new_xyz[:, 1]
elif flip_type == 3:
new_xyz[:, :2] = -new_xyz[:, :2]
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
new_xyz[:, :2] = np.dot(new_xyz[:, :2], j)
xyz = np.concatenate(xyz, new_xyz[:, :3], axis=0)
labels = np.concatenate(labels, new_label[:, 1], axis=0)
if sig is not None:
sig = np.concatenate(sig, new_xyz[:, 3], axis=0)
if lcw is not None:
lcw = np.concatenate(lcw, np.ones_like(new_label[:, 1]), axis=0)
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
max_bound = np.percentile(xyz, 100, axis=0)
min_bound = np.percentile(xyz, 0, axis=0)
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
# process voxel position
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
# process labels
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
# process the lcw
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1)#
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)
def polar2cat(input_xyz_polar):
# print(input_xyz_polar.shape)
x = input_xyz_polar[0] * np.cos(input_xyz_polar[1])
y = input_xyz_polar[0] * np.sin(input_xyz_polar[1])
return np.stack((x, y, input_xyz_polar[2]), axis=0)
@register_dataset
class cylinder_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False,
transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi / 4, max_rad=np.pi / 4,
cut_mix=False, use_tta=False):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.transform = transform_aug
self.trans_std = trans_std
self.cut_mix = cut_mix
self.use_tta = use_tta
self.noise_rotation = np.random.uniform(min_rad, max_rad)
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def rotation_points_single_angle(self, points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if self.use_tta:
data_total = []
voting = 4
for idx in range(voting):
data_single_ori = self.get_single_sample(data, index, idx)
data_total.append(data_single_ori)
data_total = tuple(data_total)
return data_total
else:
data_single = self.get_single_sample(data, index)
return data_single
def get_single_sample(self, data, index, vote_idx=0):
split = self.point_cloud_dataset.imageset
# initialization
xyz = None
labels = None
sig = None
lcw = None
ref_st_ind = None
ref_end_ind = None
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# TODO: check -----------------------------------------------------
# cut mix data augmentation by grabbing instance and add it to a new scene
if self.cut_mix and ((split == 'train') or (split == 'ssl')):
# load/grab the object
dir = '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Labeled/cut_mix'
new_xyz_all = np.load(f"{dir}/pcl.npy")
new_label_all = np.load(f"{dir}/ss_id.npy")
unique_obj = np.unique(new_label_all[:, 0])
num_object = 10
sel_obj_rand = np.random.choice(len(unique_obj), num_object)
aug_label = []
aug_xyz = []
for id in sel_obj_rand:
obj_mask = new_label_all[:, 0] == id
new_label = new_label_all[obj_mask]
new_xyz = new_xyz_all[obj_mask]
# perform random mix/placement on the road
road_mask = np.squeeze(labels) == 18
road_pcl = xyz[road_mask]
mix_pos_rand = np.random.choice(len(road_pcl), 1)
mix_position = road_pcl[mix_pos_rand, :]
mix_p_x = mix_position[:, 0] - 0.5
mix_p_y = mix_position[:, 1] - 0.5
mix_p_z = mix_position[:, 2]
new_xyz[:, 0] = new_xyz[:, 0] - np.max(new_xyz[:, 0])
new_xyz[:, 1] = new_xyz[:, 1] - np.max(new_xyz[:, 1])
new_xyz[:, 2] = new_xyz[:, 2] - np.min(new_xyz[:, 2])
new_xyz[:, 0] = new_xyz[:, 0] + mix_p_x
new_xyz[:, 1] = new_xyz[:, 1] + mix_p_y
new_xyz[:, 2] = new_xyz[:, 2] + mix_p_z
if self.use_tta:
flip_type = vote_idx
else:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
new_xyz[:, 0] = -new_xyz[:, 0]
elif flip_type == 2:
new_xyz[:, 1] = -new_xyz[:, 1]
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
new_xyz[:, :2] = np.dot(new_xyz[:, :2], j)
aug_label.append(new_label)
aug_xyz.append(new_xyz)
mframe = int(len(new_xyz) / 130000)
if mframe > 1:
for i in range(1, mframe):
new_xyz[:, 0] = new_xyz[:, 0] - i / 2
aug_label.append(new_label)
aug_xyz.append(new_xyz)
new_label = np.concatenate(aug_label, axis=0)
new_xyz = np.concatenate(aug_xyz, axis=0)
# combine gt data and cut_mix augmentation
xyz = np.concatenate([xyz, new_xyz[:, :3]], axis=0)
labels = np.concatenate([labels, new_label[:, 1].reshape(-1, 1)], axis=0)
if sig is not None:
sig = np.concatenate([sig.reshape(-1, 1), new_xyz[:, 3].reshape(-1, 1)], axis=0)
sig = np.squeeze(sig)
if lcw is not None:
new_lcw = np.ones_like(new_label[:, 1]) * 100
lcw = np.concatenate([lcw, new_lcw.reshape(-1, 1)], axis=0)
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 90) - np.pi / 4
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
if self.use_tta:
flip_type = vote_idx
else:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
# convert coordinate into polar coordinates
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1) #
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
# include reference frame start and end index
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
# include pseudo label confidence weights
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
@register_dataset
class polar_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2:
sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 45) - np.pi / 8
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1) #
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
# include pseudo label confidence weights
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
# refrence frame index
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_label(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.uint16)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_lcw(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.float32)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.float32)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
ref_st_index = None
ref_end_index = None
lcw2stack = None
# if multi frame but not ssl: also add the start and end index of reference scan/frame
if len(data[0]) == 7:
ref_st_index = [d[5] for d in data]
ref_end_index = [d[6] for d in data]
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index
# if ssl and multi frame: also add the start and end index of reference scan/frame and
# confidence probability pseudo label
elif len(data[0]) == 8:
ref_st_index = [d[6] for d in data]
ref_end_index = [d[7] for d in data]
lcw2stack = np.stack([d[5] for d in data]).astype(np.float32)
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz
return torch.from_numpy(data2stack), torch.from_numpy(
label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
def collate_fn_BEV_tta(data):
data2stack = np.stack([da2[0] for da1 in data for da2 in da1]).astype(np.float32)
label2stack = np.stack([da2[1] for da1 in data for da2 in da1]).astype(np.int)
voxel_label = []
for da1 in data:
for da2 in da1:
voxel_label.append(da2[1])
#voxel_label.astype(np.int)
grid_ind_stack = []
for da1 in data:
for da2 in da1:
grid_ind_stack.append(da2[2])
point_label = []
for da1 in data:
for da2 in da1:
point_label.append(da2[3])
xyz = []
for da1 in data:
for da2 in da1:
xyz.append(da2[4])
# index = []
# for da1 in data:
# for da2 in da1:
# index.append(da2[5])
ref_st_index = None
ref_end_index = None
lcw2stack = None
# if multi frame but not ssl: also add the start and end index of reference scan/frame
if len(data[0]) == 7:
ref_st_index = [da2[5] for da1 in data for da2 in da1]
ref_end_index = [da2[6] for da1 in data for da2 in da1]
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index
# if ssl and multi frame: also add the start and end index of reference scan/frame and
# confidence probability pseudo label
elif len(data[0]) == 8:
ref_st_index = [da2[6] for da1 in data for da2 in da1]
ref_end_index = [da2[7] for da1 in data for da2 in da1]
lcw2stack = np.stack([da2[5] for da1 in data for da2 in da1]).astype(np.float32)
# return xyz, voxel_label, grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
return torch.from_numpy(data2stack), torch.from_numpy(
label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
def collate_fn_BEV_test(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
index = [d[5] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, index
| 29,622 | 40.372905 | 150 | py |
T-Concord3D | T-Concord3D-master/dataloader/augmentations.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
# from __future__ import (
# division,
# absolute_import,
# with_statement,
# print_function,
# unicode_literals,
# )
import random
import numpy as np
import torch
#from pointnet2.data.data_utils import angle_axis
# Utilis
def angle_axis(angle, axis):
# type: (float, np.ndarray) -> float
r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
Parameters
----------
angle : float
Angle to rotate by
axis: np.ndarray
Axis to rotate about
Returns
-------
torch.Tensor
3x3 rotation matrix
"""
u = axis / np.linalg.norm(axis)
cosval, sinval = np.cos(angle), np.sin(angle)
# yapf: disable
cross_prod_mat = np.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
R = torch.from_numpy(
cosval * np.eye(3)
+ sinval * cross_prod_mat
+ (1.0 - cosval) * np.outer(u, u)
)
# yapf: enable
return R.float()
##################################3
def RandomFlipX(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,0] *= -1
return points
def RandomFlipY(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,1] *= -1
return points
def RandomFlipZ(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,2] *= -1
return points
def ScaleX(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 0 ] *= scaler
return pts
def ScaleY(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 1 ] *= scaler
return pts
def ScaleZ(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 2 ] *= scaler
return pts
def Resize(pts,v):
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 0:3 ] *= scaler
return pts
def NonUniformScale(pts,v): # Resize in [0.5 , 1.5]
assert 0 <= v <= 0.5
scaler = np.random.uniform( low = 1 - v, high = 1 + v, size = 3 )
pts[:, 0:3] *= torch.from_numpy(scaler).float()
return pts
def RotateX(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([1. , 0. , 0.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotateY(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([0. , 1. , 0.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotateZ(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([0. , 0. , 1.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RandomAxisRotation(points,v):
assert 0 <= v <= 2 * np.pi
axis = np.random.randn(3)
axis /= np.sqrt((axis**2).sum())
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotatePerturbation(points,v):
assert 0 <= v <= 10
v = int(v)
angle_sigma = 0.1 * v
angle_clip = 0.1 * v
n_idx = 50 * v
angles = np.clip(angle_sigma * np.random.randn(3), -angle_clip, angle_clip)
Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = Rz @ Ry @ Rx
center = torch.mean(points[:,0:3], dim = 0)
idx = np.random.choice(points.size(0),n_idx)
perturbation = points[idx, 0:3] - center
points[idx, :3] += (perturbation @ rotation_matrix.t()) - perturbation
normals = points.size(1) > 3
if normals:
pc_normals = points[idx, 3:]
points[idx, 3:] = pc_normals @ rotation_matrix.t()
return points
def Jitter(points,v):
assert 0.0 <= v <= 10
v = int(v)
sigma = 0.1 * v
n_idx = 50 * v
idx = np.random.choice(points.size(0),n_idx)
jitter = sigma * (np.random.random([n_idx, 3]) - 0.5)
points[idx, 0:3] += torch.from_numpy(jitter).float()
return points
def PointToNoise(points,v):
assert 0 <= v <= 0.5
mask = np.random.random(points.size(0)) < v
noise_idx = [idx for idx in range(len(mask)) if mask[idx] == True]
pts_rand = 2 * (np.random.random([len(noise_idx), 3]) - 0.5) + np.mean(points[:,0:3].numpy(), axis= 0)
points[noise_idx, 0:3] = torch.from_numpy(pts_rand).float()
return points
def UniformTranslate(points ,v):
assert 0 <= v <= 1
translation = (2 * np.random.random() - 1 ) * v
points[:, 0:3] += translation
return points
def NonUniformTranslate(points ,v):
assert 0 <= v <= 1
translation = (2 * np.random.random(3) - 1 ) * v
points[:, 0:3] += torch.from_numpy(translation).float()
return points
def RandomDropout(points,v):
assert 0.3 <= v <= 0.875
dropout_rate = v
drop = torch.rand(points.size(0)) < dropout_rate
save_idx = np.random.randint(points.size(0))
points[drop] = points[save_idx]
return points
def RandomErase(points,v):
assert 0 <= v <= 0.5
"v : the radius of erase ball"
random_idx = np.random.randint(points.size(0))
mask = torch.sum((points[:,0:3] - points[random_idx,0:3]).pow(2), dim = 1) < v ** 2
points[mask] = points[random_idx]
return points
#
# def DBSCAN(points,v):
# "https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html"
# assert 0 <= v <= 10
# from sklearn.cluster import DBSCAN
# eps = 0.03 * v
# min_samples = v
# clustering = DBSCAN(eps = eps , min_samples = min_samples).fit(points[:,0:3].numpy())
# for label in set(clustering.labels_):
# mask = (clustering.labels_ == label)
# points[mask,0:3] = torch.mean(points[mask,0:3] , dim = 0)
#
# return points
def ShearXY(points,v):
assert 0 <= v <= 0.5
a , b = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, 0, 0],
[0, 1, 0],
[a, b, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def ShearYZ(points,v):
assert 0 <= v <= 0.5
b , c = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, b, c],
[0, 1, 0],
[0, 0, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def ShearXZ(points,v):
assert 0 <= v <= 0.5
a , c = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, 0, 0],
[a, 1, c],
[0, 0, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def GlobalAffine(points,v):
assert 0 <= v <= 1
affine_matrix = torch.from_numpy(np.eye(3) + np.random.randn(3,3) * v).float()
points[:,0:3] = points[:, 0:3] @ affine_matrix.t()
return points
def Identity(points , v):
return points
def augment_list(): # operations and their ranges
l = (
(Identity , 0 ,10),
(RandomFlipX, 0, 1),
(RandomFlipY, 0, 1),
(RandomFlipZ, 0, 1),
(ScaleX, 0, 0.5),
(ScaleY, 0, 0.5),
(ScaleZ, 0, 0.5),
(NonUniformScale, 0 , 0.5),
(Resize , 0 , 0.5),
(RotateX, 0, 2 * np.pi),
(RotateY, 0, 2 * np.pi),
(RotateZ, 0, 2 * np.pi),
(RandomAxisRotation, 0, 2 * np.pi),
(RotatePerturbation, 0, 10),
(Jitter, 0 , 10),
(UniformTranslate, 0 , 0.5),
(NonUniformTranslate, 0 , 0.5),
(RandomDropout, 0.3 , 0.875),
(RandomErase, 0, 0.5),
(PointToNoise, 0 , 0.5),
(ShearXY, 0 , 0.5 ),
(ShearYZ, 0 , 0.5 ),
(ShearXZ, 0 , 0.5 ),
(GlobalAffine , 0 , 0.15),
)
return l
# class RandAugment3D:
# def __init__(self, n, m):
# self.n = n
# self.m = m # [0, 30]
# self.augment_list = augment_list()
#
# def __call__(self, img):
# ops = random.sample(self.augment_list, k=self.n)
# for op in ops:
# # val = (float(self.m) / 30) * float(maxval - minval) + minval
# val = self.m
# img = op(img, val)
#
# return img
class RandAugment3D:
def __init__(self, n=2, m=10):
"""
The number of augmentations = ?
N : The number of augmentation choice
M : magnitude of augmentation
"""
self.n = n
self.m = m # [0, 10]
self.augment_list = augment_list()
self.epoch = 0
def __call__(self, pc):
assert 0 <= self.m <= 10
if pc.dim() == 3:
bsize = pc.size()[0]
for i in range(bsize):
points = pc[i,:,:]
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = float(self.m)
points = op(points, val)
elif pc.dim() == 2:
points = pc
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 10) * float(maxval - minval) + minval
points = op(points, val)
return pc
def UpdateNM(self,increase=True):
N_tmp, M_tmp = self.n, self.m
if increase:
if np.random.random() > 0.5:
self.n += 1
elif self.m < 10:
self.m += 1
print("\n Increase N,M from ({},{}) to ({} ,{}) \n".format(N_tmp, M_tmp, self.n, self.m))
elif increase == False:
if np.random.random() > 0.5 and self.n > 1:
self.n -= 1
elif self.m > 1:
self.m -= 1
print("\n Decrease N,M from ({},{}) to ({} ,{}) \n".format(N_tmp, M_tmp, self.n, self.m))
| 11,680 | 27.079327 | 106 | py |
T-Concord3D | T-Concord3D-master/dataloader/pc_dataset.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: pc_dataset.py
import glob
import os
import pickle
from os.path import exists
import numpy as np
import yaml
from torch.utils import data
REGISTERED_PC_DATASET_CLASSES = {}
# past and future frames global place holders
past = 0
future = 0
T_past = 0
T_future = 0
ssl = False
rgb = False
def register_dataset(cls, name=None):
global REGISTERED_PC_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_PC_DATASET_CLASSES, f"exist class: {REGISTERED_PC_DATASET_CLASSES}"
REGISTERED_PC_DATASET_CLASSES[name] = cls
return cls
def get_pc_model_class(name):
global REGISTERED_PC_DATASET_CLASSES
assert name in REGISTERED_PC_DATASET_CLASSES, f"available class: {REGISTERED_PC_DATASET_CLASSES}"
return REGISTERED_PC_DATASET_CLASSES[name]
@register_dataset
class SemKITTI_demo(data.Dataset):
def __init__(self, data_path, imageset='demo',
return_ref=True, label_mapping="semantic-wod.yaml", demo_label_path=None):
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.return_ref = return_ref
self.im_idx = []
self.im_idx += absoluteFilePaths(data_path)
self.label_idx = []
if self.imageset == 'val':
print(demo_label_path)
self.label_idx += absoluteFilePaths(demo_label_path)
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
if self.imageset == 'demo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
elif self.imageset == 'val':
annotated_data = np.fromfile(self.label_idx[index], dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
@register_dataset
class SemKITTI_sk(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="semantic-wod.yaml", nusc=None, ssl_data_path=None):
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
global past, future, ssl, T_past, T_fture
self.past = past
self.future = future
self.T_past = T_past
self.T_future = T_future
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
'''
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
'''
origin_len = len(raw_data)
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
# x = self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label'
if ssl and exists(self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
annotated_data = np.fromfile(
self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
# annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
if ssl and exists(self.im_idx[index].replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
lcw = np.fromfile(self.im_idx[index].replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.float32).reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0], dtype=np.float32), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
number_idx = int(self.im_idx[index][-10:-4])
dir_idx = int(self.im_idx[index][-22:-20])
past_frame_len = 0
future_frame_len = 0
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref and ssl:
# np.save('pcl.npy', raw_data[:, :3])
# np.save('label.npy', annotated_data)
# TODO: masking below 0.8 confidence
# lcw_mask = lcw < 80
# lcw[lcw_mask] = 0
data_tuple += (raw_data[:, 3], lcw, future_frame_len,
origin_len) # origin_len is used to indicate the length of target-scan and lcw
elif self.return_ref:
data_tuple += (
raw_data[:, 3], future_frame_len,
origin_len) # origin_len is used to indicate the length of target-scan
return data_tuple
@register_dataset
class SemKITTI_nusc(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="nuscenes.yaml", nusc=None):
self.return_ref = return_ref
with open(imageset, 'rb') as f:
data = pickle.load(f)
with open(label_mapping, 'r') as stream:
nuscenesyaml = yaml.safe_load(stream)
self.learning_map = nuscenesyaml['learning_map']
self.nusc_infos = data['infos']
self.data_path = data_path
self.nusc = nusc
def __len__(self):
'Denotes the total number of samples'
return len(self.nusc_infos)
def __getitem__(self, index):
info = self.nusc_infos[index]
lidar_path = info['lidar_path'][16:]
lidar_sd_token = self.nusc.get('sample', info['token'])['data']['LIDAR_TOP']
lidarseg_labels_filename = os.path.join(self.nusc.dataroot,
self.nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
points_label = np.vectorize(self.learning_map.__getitem__)(points_label)
points = np.fromfile(os.path.join(self.data_path, lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
data_tuple = (points[:, :3], points_label.astype(np.uint8))
if self.return_ref:
data_tuple += (points[:, 3],)
return data_tuple
def absoluteFilePaths(directory):
for dirpath, _, filenames in os.walk(directory):
filenames.sort()
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def SemKITTI2train_single(label):
remove_ind = label == 0
label -= 1
label[remove_ind] = 255
return label
from os.path import join
def transform_pcl_scan(points, pose0, pose):
# pose = poses[0][idx]
hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
# new_points = hpoints.dot(pose.T)
new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
new_points = new_points[:, :3]
new_coords = new_points - pose0[:3, 3]
# new_coords = new_coords.dot(pose0[:3, :3])
new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
new_coords = np.hstack((new_coords, points[:, 3:]))
return new_coords
def fuse_multiscan(ref_raw_data, ref_annotated_data, ref_lcw, transformed_data,
transformed_annotated_data, transformed_lcw, source, ssl):
lcw = None
if (source != 1) and (source != -1):
print(f"Error data source {source} not Implemented")
return 0
if source == -1: # past frame
raw_data = np.concatenate((transformed_data, ref_raw_data), 0)
annotated_data = np.concatenate((transformed_annotated_data, ref_annotated_data), 0)
if ssl:
lcw = np.concatenate((transformed_lcw, ref_lcw), 0)
if source == 1: # future frame
raw_data = np.concatenate((ref_raw_data, transformed_data,), 0)
annotated_data = np.concatenate((ref_annotated_data, transformed_annotated_data), 0)
if ssl:
lcw = np.concatenate((ref_lcw, transformed_lcw), 0)
return raw_data, annotated_data, lcw
def get_combined_data(raw_data, annotated_data, lcw, learning_map, return_ref, origin_len, preceding_frame_len, ssl):
#print(np.unique(annotated_data))
annotated_data = np.vectorize(learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if return_ref and ssl:
# np.save('pcl.npy', raw_data[:, :3])
# np.save('label.npy', annotated_data)
# TODO: masking below 0.8 confidence
# lcw_mask = lcw < 80
# lcw[lcw_mask] = 0
# origin_len is used to indicate the length of target-scan and lcw
data_tuple += (raw_data[:, 3], lcw, preceding_frame_len, origin_len)
elif return_ref:
# origin_len is used to indicate the length of target-scan
data_tuple += (raw_data[:, 3], preceding_frame_len, origin_len)
return data_tuple
@register_dataset
class SemKITTI_sk_multiscan(data.Dataset):
def __init__(self, data_path, imageset='train', return_ref=False, label_mapping="semantic-kitti-multiscan.yaml",
train_hypers=None, wod=None, ssl_data_path=None):
global past, future, ssl, T_past, T_fture
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.return_ref = return_ref
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.data_path = data_path
self.past = train_hypers['past']
self.future = train_hypers['future']
self.T_past = train_hypers['T_past']
self.T_future = train_hypers['T_future']
self.ssl = train_hypers['ssl']
self.im_idx = []
self.calibrations = []
# self.times = []
self.poses = []
if imageset == 'train':
self.split = semkittiyaml['split']['train']
if self.ssl and (ssl_data_path is not None):
self.split += semkittiyaml['split']['pseudo']
elif imageset == 'val':
self.split = semkittiyaml['split']['valid']
elif imageset == 'test':
self.split = semkittiyaml['split']['test']
elif imageset == 'pseudo':
self.split = semkittiyaml['split']['pseudo']
else:
raise Exception(f'{imageset}: Split must be train/val/test/pseudo')
if self.past or self.future:
self.load_calib_poses()
for i_folder in self.split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
# self.times = []
self.poses = {} # []
for seq in self.split:
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
# self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
# self.poses.append([pose.astype(np.float32) for pose in poses_f64])
self.poses[seq] = [pose.astype(np.float32) for pose in poses_f64]
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
# print(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
# print(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def get_semantickitti_data(self, newpath, time_frame_idx):
raw_data = np.fromfile(newpath, dtype=np.float32).reshape((-1, 4))
lcw = None
if self.imageset == 'test' or self.imageset == 'pseudo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
if self.ssl and exists(newpath.replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3]
+ 'label'):
annotated_data = np.fromfile(
newpath.replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label',
dtype=np.int64).reshape((-1, 1))
else:
annotated_data = np.fromfile(newpath.replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
# if np.sum(np.unique(annotated_data == 18)) > 0:
# print(newpath)
if self.ssl and exists(newpath.replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
lcw = np.fromfile(
newpath.replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.float32).reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif self.ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0], dtype=np.float32), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
return raw_data, annotated_data, len(raw_data), lcw
def __getitem__(self, index):
# reference scan
reference_file = self.im_idx[index]
raw_data, annotated_data, data_len, lcw = self.get_semantickitti_data(reference_file, 0)
origin_len = data_len
number_idx = int(self.im_idx[index][-10:-4])
# dir_idx = int(self.im_idx[index][-22:-20])
dir_idx = self.im_idx[index].split('/')[-3]
# past scan
past_frame_len = 0
# TODO: added the future frame availability check
if self.past and ((number_idx - self.past) >= 0) and ((number_idx + self.past) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.past):
# TODO: past frames
frame_ind = fuse_idx + 1
pose = self.poses[dir_idx][number_idx - frame_ind]
past_file = self.im_idx[index][:-10] + str(number_idx - frame_ind).zfill(6) + self.im_idx[index][-4:]
past_raw_data, past_annotated_data, past_data_len, past_lcw = self.get_semantickitti_data(past_file,
-frame_ind)
past_raw_data = transform_pcl_scan(past_raw_data, pose0, pose)
# past frames
if past_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
past_raw_data, past_annotated_data, past_lcw, -1,
self.ssl)
# count number of past frame points
past_frame_len += past_data_len
# future scan
future_frame_len = 0
# TODO: added the future frame availability check
if self.future and ((number_idx - self.future) >= 0) and (
(number_idx + self.future) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.future):
# TODO: future frame
frame_ind = fuse_idx + 1
future_pose = self.poses[dir_idx][number_idx + frame_ind]
future_file = self.im_idx[index][:-10] + str(number_idx + frame_ind).zfill(6) + self.im_idx[index][-4:]
future_raw_data, future_annotated_data, future_data_len, future_lcw = self.get_semantickitti_data(
future_file, frame_ind)
future_raw_data = transform_pcl_scan(future_raw_data, pose0, future_pose)
# TODO: check correctness (future frame)
if future_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
future_raw_data, future_annotated_data, future_lcw,
1, self.ssl)
# count number of future frame points
future_frame_len += future_data_len
# extract compiled data_tuple
data_tuple = get_combined_data(raw_data, annotated_data, lcw, self.learning_map, self.return_ref,
origin_len, past_frame_len, self.ssl)
# # TODO: added the future frame availability check
return data_tuple
# WOD -------------------------------------------------------------
# def fuse_multi_scan(points, pose0, pose):
# # pose = poses[0][idx]
#
# hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
# # new_points = hpoints.dot(pose.T)
# new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
#
# new_points = new_points[:, :3]
# new_coords = new_points - pose0[:3, 3]
# # new_coords = new_coords.dot(pose0[:3, :3])
# new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
# new_coords = np.hstack((new_coords, points[:, 3:]))
#
# return new_coords
@register_dataset
class WOD_multiscan(data.Dataset):
def __init__(self, data_path, imageset='train', return_ref=False, label_mapping="wod-multiscan_labelled.yaml",
train_hypers=None, wod=None, ssl_data_path=None):
global past, future, ssl, T_past, T_fture, rgb
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
wodyaml = yaml.safe_load(stream)
self.learning_map = wodyaml['learning_map']
self.imageset = imageset
self.data_path = data_path
self.past = train_hypers['past']
self.future = train_hypers['future']
self.T_past = train_hypers['T_past']
self.T_future = train_hypers['T_future']
self.rgb = train_hypers['rgb']
self.ssl = train_hypers['ssl']
# self.use_time = train_hypers['time'] # Use time instead of intensity
# self.UDA = train_hypers['uda']
self.im_idx = []
self.calibrations = []
# self.times = []
self.poses = {}
if imageset == 'train':
self.split = wodyaml['split']['train']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
if self.ssl and (ssl_data_path is not None):
self.split += wodyaml['split']['pseudo']
elif imageset == 'val':
self.split = wodyaml['split']['valid']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
elif imageset == 'test':
self.split = wodyaml['split']['test']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
elif imageset == 'pseudo':
self.split = wodyaml['split']['pseudo']
# self.sensor_zpose = train_hypers["T_sensor_zpose"]
else:
raise Exception(f'{imageset}: Split must be train/val/test/pseudo')
# self.split = sorted(os.listdir(self.data_path))
# self.training_len = len(self.split)
# self.ssl_data_path = ssl_data_path # '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Unlabeled/testing'
# xx = self.data_path.split("/")[-1]
# if ssl and self.data_path.split("/")[-1] == "training" and self.ssl_data_path:
# self.training_len = len(sorted(os.listdir(self.data_path)))
# self.split = sorted(os.listdir(self.data_path)) + sorted(os.listdir(self.ssl_data_path))
# print(len(self.split))
# TODO: remove after search experiment
# self.split = self.split[150:]
if self.past or self.future:
self.load_calib_poses()
# for c, i_folder in enumerate(self.split):
# if ssl and (self.data_path.split("/")[-1] == "training") and (
# c >= self.training_len): # 789 number of training folders
# self.im_idx += absoluteFilePaths('/'.join([self.ssl_data_path, str(i_folder), 'lidar']))
# else:
# self.im_idx += absoluteFilePaths('/'.join([self.data_path, str(i_folder), 'lidar']))
for c, i_folder in enumerate(self.split):
self.im_idx += absoluteFilePaths('/'.join([self.data_path, str(i_folder), 'lidar']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
# self.times = []
self.poses = {} # []
for k, seq in enumerate(self.split): # range(0, 22):
# if ssl and (self.data_path.split("/")[-1] == "training") and (
# k >= self.training_len): # 789 number of training folders
# seq_folder = join(self.ssl_data_path, str(seq))
# else:
seq_folder = join(self.data_path, str(seq))
# Read poses
poses_f64 = self.parse_poses(seq_folder, k)
# self.poses.append([pose.astype(np.float32) for pose in poses_f64])
self.poses[seq] = [pose.astype(np.float32) for pose in poses_f64]
def parse_poses(self, seq, k):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
filename = sorted(glob.glob(os.path.join(seq, "poses", "*.npy")))
poses = []
for file in filename:
pose = np.load(file)
poses.append(pose)
return poses
def get_wod_data(self, newpath, time_frame_idx):
raw_data = np.load(newpath)
# if self.use_time:
# raw_data[:, 3] = np.ones_like(raw_data[:, 3]) * time_frame_idx
# if self.UDA:
# raw_data[:, 2] += self.sensor_zpose # elevate the point cloud two meters up to align with WOD
# TODO: check if the colors are encoded correctly instead of the lidar intensity
if self.rgb:
# load rgb colors for each points
raw_rgb = np.load(newpath.replace('lidar', 'colors')[
:-3] + 'npy')
# convert rgb into gray scale [0, 255]
raw_gray = 0.2989 * raw_rgb[:, 0] + 0.5870 * raw_rgb[:, 1] + 0.1140 * raw_rgb[:, 2]
# mask (0) ignored point colors (originally not provided on wod rear-cameras) -> rgb:[1,1,1] or gray:[
# 0.99990])
gray_mask = raw_gray > 1 # < 1 #0.9998999999999999
# assign 0 to the place we want to mask
raw_gray[gray_mask] = -1
# replace intensity with gray scale camera image/frame color
raw_data[:, 3] = raw_gray
# raw_data[:,4] = gray_mask * 1
lcw = None
origin_len = len(raw_data)
if self.imageset == 'test' or self.imageset == 'pseudo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0]), axis=1).reshape((-1, 1))
else:
# x = self.im_idx[index].replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label'
if self.ssl and exists(newpath.replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'npy'):
annotated_data = np.load(
newpath.replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'npy').reshape((-1, 1))
else:
# print(self.im_idx[index].replace('lidar', 'labels')[:-3] + 'npy')
annotated_data = np.load(newpath.replace('lidar', 'labels')[:-3] + 'npy',
allow_pickle=True)
if len(annotated_data.shape) == 2:
if annotated_data.shape[1] == 2:
annotated_data = annotated_data[:, 1]
# Reshape the label/annotation to vector.
annotated_data = annotated_data.reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
if self.ssl and exists(newpath.replace('lidar', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'npy'):
lcw = np.load(newpath.replace('lidar', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'npy').reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif self.ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0]), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
return raw_data, annotated_data, len(raw_data), lcw
def __getitem__(self, index):
# reference scan
reference_file = self.im_idx[index]
raw_data, annotated_data, data_len, lcw = self.get_wod_data(reference_file, 0)
origin_len = data_len
number_idx = int(self.im_idx[index][-10:-4])
# dir_idx = int(self.im_idx[index][-22:-20])
dir_idx = self.im_idx[index].split('/')[-3]
# past scan
past_frame_len = 0
# TODO: added the future frame availability check
if self.past and ((number_idx - self.past) >= 0) and ((number_idx + self.past) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.past):
# TODO: past frames
frame_ind = fuse_idx + 1
pose = self.poses[dir_idx][number_idx - frame_ind]
past_file = self.im_idx[index][:-10] + str(number_idx - frame_ind).zfill(6) + self.im_idx[index][-4:]
past_raw_data, past_annotated_data, past_data_len, past_lcw = self.get_wod_data(past_file, -frame_ind)
# transform the past frame into reference frame coordinate system
past_raw_data = transform_pcl_scan(past_raw_data, pose0, pose)
# past frames
if past_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
past_raw_data, past_annotated_data, past_lcw, -1,
self.ssl)
# count number of past frame points
past_frame_len += past_data_len
# future scan
future_frame_len = 0
# TODO: added the future frame availability check
if self.future and ((number_idx - self.future) >= 0) and (
(number_idx + self.future) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.future):
# TODO: future frame
frame_ind = fuse_idx + 1
future_pose = self.poses[dir_idx][number_idx + frame_ind]
future_file = self.im_idx[index][:-10] + str(number_idx + frame_ind).zfill(6) + self.im_idx[index][-4:]
future_raw_data, future_annotated_data, future_data_len, future_lcw = self.get_wod_data(future_file,
frame_ind)
# transform the future frame into reference frame coordinate system
future_raw_data = transform_pcl_scan(future_raw_data, pose0, future_pose)
# TODO: check correctness (future frame)
if future_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
future_raw_data, future_annotated_data, future_lcw,
1, self.ssl)
# count number of future frame points
future_frame_len += future_data_len
# extract compiled data_tuple
data_tuple = get_combined_data(raw_data, annotated_data, lcw, self.learning_map, self.return_ref,
origin_len, past_frame_len, self.ssl)
return data_tuple
# load label class info
def get_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
config_yaml = yaml.safe_load(stream)
class_label_name = dict()
for i in sorted(list(config_yaml['learning_map'].keys()))[::-1]:
class_label_name[config_yaml['learning_map'][i]] = config_yaml['labels'][i]
return class_label_name
def get_label_inv_name(label_inv_mapping):
with open(label_inv_mapping, 'r') as stream:
config_yaml = yaml.safe_load(stream)
# label_inv_name = dict()
label_inv_name = config_yaml['learning_map_inv']
return label_inv_name
def get_nuScenes_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
nuScenesyaml = yaml.safe_load(stream)
nuScenes_label_name = dict()
for i in sorted(list(nuScenesyaml['learning_map'].keys()))[::-1]:
val_ = nuScenesyaml['learning_map'][i]
nuScenes_label_name[val_] = nuScenesyaml['labels_16'][val_]
return nuScenes_label_name
def update_config(configs):
global past, future, T_past, T_future, ssl, rgb
train_hypers = configs['train_params']
past = train_hypers['past']
future = train_hypers['future']
T_past = train_hypers['T_past']
T_future = train_hypers['T_future']
ssl = train_hypers['ssl']
rgb = train_hypers['rgb'] | 34,323 | 40.354217 | 130 | py |
T-Concord3D | T-Concord3D-master/dataloader/dataset_nuscenes.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: dataset_nuscenes.py
import numpy as np
import torch
import numba as nb
from torch.utils import data
from dataloader.dataset_semantickitti import register_dataset
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)
def polar2cat(input_xyz_polar):
x = input_xyz_polar[0] * np.cos(input_xyz_polar[1])
y = input_xyz_polar[0] * np.sin(input_xyz_polar[1])
return np.stack((x, y, input_xyz_polar[2]), axis=0)
@register_dataset
class cylinder_dataset_nuscenes(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=0, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 3], min_volume_space=[0, -np.pi, -5],
scale_aug=False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi / 4, max_rad=np.pi / 4):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
def __len__(self):
return len(self.point_cloud_dataset)
def __getitem__(self, index):
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 360) - np.pi
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
# convert coordinate into polar coordinates
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process labels
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
return data_tuple
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_label(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.uint16)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz
# SemKITTI_label_name = {0: 'noise',
# 1: 'animal',
# 2: 'human.pedestrian.adult',
# 3: 'human.pedestrian.child',
# 4: 'human.pedestrian.construction_worker',
# 5: 'human.pedestrian.personal_mobility',
# 6: 'human.pedestrian.police_officer',
# 7: 'human.pedestrian.stroller',
# 8: 'human.pedestrian.wheelchair',
# 9: 'movable_object.barrier',
# 10: 'movable_object.debris',
# 11: 'movable_object.pushable_pullable',
# 12: 'movable_object.trafficcone',
# 13: 'static_object.bicycle_rack',
# 14: 'vehicle.bicycle',
# 15: 'vehicle.bus.bendy',
# 16: 'vehicle.bus.rigid',
# 17: 'vehicle.car',
# 18: 'vehicle.construction',
# 19: 'vehicle.emergency.ambulance',
# 20: 'vehicle.emergency.police',
# 21: 'vehicle.motorcycle',
# 22: 'vehicle.trailer',
# 23: 'vehicle.truck',
# 24: 'flat.driveable_surface',
# 25: 'flat.other',
# 26: 'flat.sidewalk',
# 27: 'flat.terrain',
# 28: 'static.manmade',
# 29: 'static.other',
# 30: 'static.vegetation',
# 31: 'vehicle.ego'
# }
#
# SemKITTI_label_name_16 = {
# 0: 'noise',
# 1: 'barrier',
# 2: 'bicycle',
# 3: 'bus',
# 4: 'car',
# 5: 'construction_vehicle',
# 6: 'motorcycle',
# 7: 'pedestrian',
# 8: 'traffic_cone',
# 9: 'trailer',
# 10: 'truck',
# 11: 'driveable_surface',
# 12: 'other_flat',
# 13: 'sidewalk',
# 14: 'terrain',
# 15: 'manmade',
# 16: 'vegetation',
# }
#
# labels_mapping = {
# 1: 0,
# 5: 0,
# 7: 0,
# 8: 0,
# 10: 0,
# 11: 0,
# 13: 0,
# 19: 0,
# 20: 0,
# 0: 0,
# 29: 0,
# 31: 0,
# 9: 1,
# 14: 2,
# 15: 3,
# 16: 3,
# 17: 4,
# 18: 5,
# 21: 6,
# 2: 7,
# 3: 7,
# 4: 7,
# 6: 7,
# 12: 8,
# 22: 9,
# 23: 10,
# 24: 11,
# 25: 12,
# 26: 13,
# 27: 14,
# 28: 15,
# 30: 16
# }
| 9,266 | 35.920319 | 114 | py |
T-Concord3D | T-Concord3D-master/dataloader/__init__.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: __init__.py.py
from . import dataset_nuscenes
| 95 | 15 | 30 | py |
T-Concord3D | T-Concord3D-master/dataloader/preprocess.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
import torchvision.transforms as transforms
from augmentations import RandAugment3D
def preprocessing(point_set, cls):
pts_transform = transforms.Compose(
[]
)
pts_transform.transforms.insert(0, RandAugment3D(2, 2))
return torch.from_numpy(pts_transform(point_set)), cls
| 397 | 23.875 | 59 | py |
darts | darts-master/cnn/test.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 3,593 | 33.228571 | 102 | py |
darts | darts-master/cnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(),
lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
def _compute_unrolled_model(self, input, target, eta, network_optimizer):
loss = self.model._loss(input, target)
theta = _concat(self.model.parameters()).data
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, moment+dtheta))
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer)
else:
self._backward_step(input_valid, target_valid)
self.optimizer.step()
def _backward_step(self, input_valid, target_valid):
loss = self.model._loss(input_valid, target_valid)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):
unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer)
unrolled_loss = unrolled_model._loss(input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
vector = [v.grad.data for v in unrolled_model.parameters()]
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss = self.model._loss(input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss = self.model._loss(input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 3,429 | 35.88172 | 130 | py |
darts | darts-master/cnn/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays')
parser.add_argument('--parallel', action='store_true', default=False, help='data parallelism')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
if args.parallel:
model = nn.DataParallel(model).cuda()
else:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
best_acc_top1 = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(async=True)
input = input.cuda()
input = Variable(input)
target = Variable(target)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 7,992 | 33.601732 | 106 | py |
darts | darts-master/cnn/utils.py | import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
| 3,080 | 24.254098 | 105 | py |
darts | darts-master/cnn/model.py | import torch
import torch.nn as nn
from operations import *
from torch.autograd import Variable
from utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE: This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
# nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
| 6,640 | 29.888372 | 89 | py |
darts | darts-master/cnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| 5,009 | 29.54878 | 128 | py |
darts | darts-master/cnn/train_search.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search import Network
from architect import Architect
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, args)
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
print(F.softmax(model.alphas_normal, dim=-1))
print(F.softmax(model.alphas_reduce, dim=-1))
# training
train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr)
logging.info('train_acc %f', train_acc)
# validation
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda(async=True)
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = Variable(input_search, requires_grad=False).cuda()
target_search = Variable(target_search, requires_grad=False).cuda(async=True)
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 7,212 | 35.80102 | 115 | py |
darts | darts-master/cnn/test_imagenet.py | import os
import sys
import numpy as np
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CLASSES = 1000
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
model.load_state_dict(torch.load(args.model_path)['state_dict'])
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
model.drop_path_prob = args.drop_path_prob
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 3,785 | 32.504425 | 104 | py |
darts | darts-master/cnn/train.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(async=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 6,251 | 35.561404 | 100 | py |
darts | darts-master/cnn/visualize.py | import sys
import genotypes
from graphviz import Digraph
def plot(genotype, filename):
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
assert len(genotype) % 2 == 0
steps = len(genotype) // 2
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for i in range(steps):
for k in [2*i, 2*i + 1]:
op, j = genotype[k]
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j-2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=True)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage:\n python {} ARCH_NAME".format(sys.argv[0]))
sys.exit(1)
genotype_name = sys.argv[1]
try:
genotype = eval('genotypes.{}'.format(genotype_name))
except AttributeError:
print("{} is not specified in genotypes.py".format(genotype_name))
sys.exit(1)
plot(genotype.normal, "normal")
plot(genotype.reduce, "reduction")
| 1,419 | 24.357143 | 141 | py |
darts | darts-master/cnn/genotypes.py | from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal = [
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat = [2, 3, 4, 5, 6],
reduce = [
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat = [4, 5, 6],
)
AmoebaNet = Genotype(
normal = [
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat = [4, 5, 6],
reduce = [
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat = [3, 4, 6]
)
DARTS_V1 = Genotype(normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
| 2,410 | 29.518987 | 429 | py |
darts | darts-master/cnn/operations.py | import torch
import torch.nn as nn
OPS = {
'none' : lambda C, stride, affine: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3' : lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect' : lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7' : lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
| 3,717 | 34.075472 | 129 | py |
darts | darts-master/rnn/test.py | import argparse
import os, sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import data
import model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.2,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--model_path', type=str, default='EXP/model.pt',
help='path to load the pretrained model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=1,
help='number of experts')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
args = parser.parse_args()
def logging(s, print_=True, log_=True):
print(s)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
test_batch_size = 1
test_data = batchify(corpus.test, test_batch_size, args)
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
print(i, data_source.size(0)-1)
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.
model = torch.load(args.model_path)
total_params = sum(x.data.nelement() for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
| 5,048 | 40.385246 | 118 | py |
darts | darts-master/rnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _clip(grads, max_norm):
total_norm = 0
for g in grads:
param_norm = g.data.norm(2)
total_norm += param_norm ** 2
total_norm = total_norm ** 0.5
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grads:
g.data.mul_(clip_coef)
return clip_coef
class Architect(object):
def __init__(self, model, args):
self.network_weight_decay = args.wdecay
self.network_clip = args.clip
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(), lr=args.arch_lr, weight_decay=args.arch_wdecay)
def _compute_unrolled_model(self, hidden, input, target, eta):
loss, hidden_next = self.model._loss(hidden, input, target)
theta = _concat(self.model.parameters()).data
grads = torch.autograd.grad(loss, self.model.parameters())
clip_coef = _clip(grads, self.network_clip)
dtheta = _concat(grads).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, dtheta))
return unrolled_model, clip_coef
def step(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid,
network_optimizer, unrolled):
eta = network_optimizer.param_groups[0]['lr']
self.optimizer.zero_grad()
if unrolled:
hidden = self._backward_step_unrolled(hidden_train, input_train, target_train, hidden_valid, input_valid, target_valid, eta)
else:
hidden = self._backward_step(hidden_valid, input_valid, target_valid)
self.optimizer.step()
return hidden, None
def _backward_step(self, hidden, input, target):
loss, hidden_next = self.model._loss(hidden, input, target)
loss.backward()
return hidden_next
def _backward_step_unrolled(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid, eta):
unrolled_model, clip_coef = self._compute_unrolled_model(hidden_train, input_train, target_train, eta)
unrolled_loss, hidden_next = unrolled_model._loss(hidden_valid, input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
dtheta = [v.grad for v in unrolled_model.parameters()]
_clip(dtheta, self.network_clip)
vector = [dt.data for dt in dtheta]
implicit_grads = self._hessian_vector_product(vector, hidden_train, input_train, target_train, r=1e-2)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta * clip_coef, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
return hidden_next
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, hidden, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 4,003 | 34.122807 | 132 | py |
darts | darts-master/rnn/utils.py | import torch
import torch.nn as nn
import os, shutil
import numpy as np
from torch.autograd import Variable
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
print(data.size())
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len])
return data, target
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def save_checkpoint(model, optimizer, epoch, path, finetune=False):
if finetune:
torch.save(model, os.path.join(path, 'finetune_model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'finetune_optimizer.pt'))
else:
torch.save(model, os.path.join(path, 'model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer.pt'))
torch.save({'epoch': epoch+1}, os.path.join(path, 'misc.pt'))
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = embed._backend.Embedding.apply(words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
class LockedDropout(nn.Module):
def __init__(self):
super(LockedDropout, self).__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m.div_(1 - dropout), requires_grad=False)
mask = mask.expand_as(x)
return mask * x
def mask2d(B, D, keep_prob, cuda=True):
m = torch.floor(torch.rand(B, D) + keep_prob) / keep_prob
m = Variable(m, requires_grad=False)
if cuda:
m = m.cuda()
return m
| 2,955 | 30.446809 | 137 | py |
darts | darts-master/rnn/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import STEPS
from utils import mask2d
from utils import LockedDropout
from utils import embedded_dropout
from torch.autograd import Variable
INITRANGE = 0.04
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
super(DARTSCell, self).__init__()
self.nhid = nhid
self.dropouth = dropouth
self.dropoutx = dropoutx
self.genotype = genotype
# genotype is None when doing arch search
steps = len(self.genotype.recurrent) if self.genotype is not None else STEPS
self._W0 = nn.Parameter(torch.Tensor(ninp+nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE)) for i in range(steps)
])
def forward(self, inputs, hidden):
T, B = inputs.size(0), inputs.size(1)
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1.-self.dropoutx)
h_mask = mask2d(B, hidden.size(2), keep_prob=1.-self.dropouth)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T):
hidden = self.cell(inputs[t], hidden, x_mask, h_mask)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0-h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = F.tanh
elif name == 'relu':
f = F.relu
elif name == 'sigmoid':
f = F.sigmoid
elif name == 'identity':
f = lambda x: x
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(self.genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i])
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h-s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in self.genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nhidlast,
dropout=0.5, dropouth=0.5, dropoutx=0.5, dropouti=0.5, dropoute=0.1,
cell_cls=DARTSCell, genotype=None):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
assert ninp == nhid == nhidlast
if cell_cls == DARTSCell:
assert genotype is not None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]
else:
assert genotype is None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.ntoken = ntoken
self.cell_cls = cell_cls
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, return_h=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
logit = self.decoder(output.view(-1, self.ninp))
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h:
return model_output, hidden, raw_outputs, outputs
return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return [Variable(weight.new(1, bsz, self.nhid).zero_())]
| 5,148 | 30.981366 | 102 | py |
darts | darts-master/rnn/data.py | import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line:
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words))
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0))
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
def __next__(self):
if self.idx >= len(self.sort_sents):
raise StopIteration
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size]
max_len = max([s.size(0) for s in batch])
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)
for i in range(len(batch)):
s = batch[i]
tensor[:s.size(0),i].copy_(s)
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
self.idx = 0
return self
if __name__ == '__main__':
corpus = SentCorpus('../penn')
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print(i, d.size())
| 4,005 | 30.054264 | 80 | py |
darts | darts-master/rnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import PRIMITIVES, STEPS, CONCAT, Genotype
from torch.autograd import Variable
from collections import namedtuple
from model import DARTSCell, RNNModel
class DARTSCellSearch(DARTSCell):
def __init__(self, ninp, nhid, dropouth, dropoutx):
super(DARTSCellSearch, self).__init__(ninp, nhid, dropouth, dropoutx, genotype=None)
self.bn = nn.BatchNorm1d(nhid, affine=False)
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
s0 = self.bn(s0)
probs = F.softmax(self.weights, dim=-1)
offset = 0
states = s0.unsqueeze(0)
for i in range(STEPS):
if self.training:
masked_states = states * h_mask.unsqueeze(0)
else:
masked_states = states
ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid)
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
s = torch.zeros_like(s0)
for k, name in enumerate(PRIMITIVES):
if name == 'none':
continue
fn = self._get_activation(name)
unweighted = states + c * (fn(h) - states)
s += torch.sum(probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)
s = self.bn(s)
states = torch.cat([states, s.unsqueeze(0)], 0)
offset += i+1
output = torch.mean(states[-CONCAT:], dim=0)
return output
class RNNModelSearch(RNNModel):
def __init__(self, *args):
super(RNNModelSearch, self).__init__(*args, cell_cls=DARTSCellSearch, genotype=None)
self._args = args
self._initialize_arch_parameters()
def new(self):
model_new = RNNModelSearch(*self._args)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def _initialize_arch_parameters(self):
k = sum(i for i in range(1, STEPS+1))
weights_data = torch.randn(k, len(PRIMITIVES)).mul_(1e-3)
self.weights = Variable(weights_data.cuda(), requires_grad=True)
self._arch_parameters = [self.weights]
for rnn in self.rnns:
rnn.weights = self.weights
def arch_parameters(self):
return self._arch_parameters
def _loss(self, hidden, input, target):
log_prob, hidden_next = self(input, hidden, return_h=False)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), target)
return loss, hidden_next
def genotype(self):
def _parse(probs):
gene = []
start = 0
for i in range(STEPS):
end = start + i + 1
W = probs[start:end].copy()
j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[0]
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
return gene
gene = _parse(F.softmax(self.weights, dim=-1).data.cpu().numpy())
genotype = Genotype(recurrent=gene, concat=range(STEPS+1)[-CONCAT:])
return genotype
| 3,278 | 32.804124 | 125 | py |
darts | darts-master/rnn/train_search.py | import argparse
import os, sys, glob
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from architect import Architect
import gc
import data
import model_search as model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=3,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
# assuming small_batch_size = batch_size so we don't accumulate gradients
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(F.softmax(parallel_model.weights, dim=-1))
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
best_val_loss.append(val_loss)
| 12,639 | 43.041812 | 132 | py |
darts | darts-master/rnn/train.py | import os
import gc
import sys
import glob
import time
import math
import numpy as np
import torch
import torch.nn as nn
import logging
import argparse
import genotypes
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import data
import model
from torch.autograd import Variable
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
genotype = eval("genotypes.%s" % args.arch)
model = model.RNNModel(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute,
cell_cls=model.DARTSCell, genotype=genotype)
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
logging.info('Genotype: {}'.format(genotype))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if np.isnan(total_loss[0]):
raise
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
# At any point you can hit Ctrl + C to break out of training early.
try:
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
epoch = 1
while epoch < args.epochs + 1:
epoch_start_time = time.time()
try:
train()
except:
logging.info('rolling back to the previous best model ...')
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
epoch = torch.load(os.path.join(args.save, 'misc.pt'))['epoch']
continue
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
logging.info('-' * 89)
if val_loss2 < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
else:
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
if 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):
logging.info('Switching!')
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
best_val_loss.append(val_loss)
epoch += 1
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging.info('=' * 89)
| 13,900 | 42.037152 | 141 | py |
darts | darts-master/rnn/visualize.py | import sys
import genotypes
from graphviz import Digraph
def plot(genotype, filename):
g = Digraph(
format='pdf',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
g.node("x_{t}", fillcolor='darkseagreen2')
g.node("h_{t-1}", fillcolor='darkseagreen2')
g.node("0", fillcolor='lightblue')
g.edge("x_{t}", "0", fillcolor="gray")
g.edge("h_{t-1}", "0", fillcolor="gray")
steps = len(genotype)
for i in range(1, steps + 1):
g.node(str(i), fillcolor='lightblue')
for i, (op, j) in enumerate(genotype):
g.edge(str(j), str(i + 1), label=op, fillcolor="gray")
g.node("h_{t}", fillcolor='palegoldenrod')
for i in range(1, steps + 1):
g.edge(str(i), "h_{t}", fillcolor="gray")
g.render(filename, view=True)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage:\n python {} ARCH_NAME".format(sys.argv[0]))
sys.exit(1)
genotype_name = sys.argv[1]
try:
genotype = eval('genotypes.{}'.format(genotype_name))
except AttributeError:
print("{} is not specified in genotypes.py".format(genotype_name))
sys.exit(1)
plot(genotype.recurrent, "recurrent")
| 1,327 | 26.666667 | 141 | py |
darts | darts-master/rnn/genotypes.py | from collections import namedtuple
Genotype = namedtuple('Genotype', 'recurrent concat')
PRIMITIVES = [
'none',
'tanh',
'relu',
'sigmoid',
'identity'
]
STEPS = 8
CONCAT = 8
ENAS = Genotype(
recurrent = [
('tanh', 0),
('tanh', 1),
('relu', 1),
('tanh', 3),
('tanh', 3),
('relu', 3),
('relu', 4),
('relu', 7),
('relu', 8),
('relu', 8),
('relu', 8),
],
concat = [2, 5, 6, 9, 10, 11]
)
DARTS_V1 = Genotype(recurrent=[('relu', 0), ('relu', 1), ('tanh', 2), ('relu', 3), ('relu', 4), ('identity', 1), ('relu', 5), ('relu', 1)], concat=range(1, 9))
DARTS_V2 = Genotype(recurrent=[('sigmoid', 0), ('relu', 1), ('relu', 1), ('identity', 1), ('tanh', 2), ('sigmoid', 5), ('tanh', 3), ('relu', 5)], concat=range(1, 9))
DARTS = DARTS_V2
| 851 | 22.027027 | 165 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
# if True:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="maskrcnn_benchmark",
version="0.1",
author="fmassa",
url="https://github.com/facebookresearch/maskrnn-benchmark",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "examples", "test",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 2,068 | 28.140845 | 73 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/convert_dataset.py | import os
import numpy as np
import cv2
from shapely.geometry import box, Polygon
from shapely import affinity
import math
def _rect2quad(boxes):
x_min, y_min, x_max, y_max = boxes[:, 0].reshape((-1, 1)), boxes[:, 1].reshape((-1, 1)), boxes[:, 2].reshape((-1, 1)), boxes[:, 3].reshape((-1, 1))
return np.hstack((x_min, y_min, x_max, y_min, x_max, y_max, x_min, y_max))
def _quad2rect(boxes):
## only support rectangle
return np.hstack((boxes[:, 0].reshape((-1, 1)), boxes[:, 1].reshape((-1, 1)), boxes[:, 4].reshape((-1, 1)), boxes[:, 5].reshape((-1, 1))))
def _quad2minrect(boxes):
## trans a quad(N*4) to a rectangle(N*4) which has miniual area to cover it
return np.hstack((boxes[:, ::2].min(axis=1).reshape((-1, 1)), boxes[:, 1::2].min(axis=1).reshape((-1, 1)), boxes[:, ::2].max(axis=1).reshape((-1, 1)), boxes[:, 1::2].max(axis=1).reshape((-1, 1))))
def _quad2boxlist(boxes):
res = []
for i in range(boxes.shape[0]):
res.append([[boxes[i][0], boxes[i][1]], [boxes[i][2], boxes[i][3]], [boxes[i][4], boxes[i][5]], [boxes[i][6], boxes[i][7]]])
return res
def _boxlist2quads(boxlist):
res = np.zeros((len(boxlist), 8))
for i, box in enumerate(boxlist):
# print(box)
res[i] = np.array([box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1]])
return res
def _rotate_image(im, polygons, angle):
new_polygons = polygons
## rotate image first
height, width, _ = im.shape
## get the minimal rect to cover the rotated image
img_box = np.array([[0, 0, width, 0, width, height, 0, height]])
rotated_img_box = _quad2minrect(_rotate_polygons(img_box, -1*angle, (width/2, height/2)))
r_height = int(max(rotated_img_box[0][3], rotated_img_box[0][1]) - min(rotated_img_box[0][3], rotated_img_box[0][1]))
r_width = int(max(rotated_img_box[0][2], rotated_img_box[0][0]) - min(rotated_img_box[0][2], rotated_img_box[0][0]))
r_height_padding = max(r_height, height)
r_width_padding = max(r_width, width)
## padding im
im_padding = np.zeros((r_height_padding, r_width_padding, 3))
start_h, start_w = int((r_height_padding - height)/2.0), int((r_width_padding - width)/2.0)
# start_h = max(start_h, 0)
# start_w = max(start_w, 0)
end_h, end_w = start_h + height, start_w + width
# print(start_h, end_h, start_w, end_w, im.shape)
im_padding[start_h:end_h, start_w:end_w, :] = im
M = cv2.getRotationMatrix2D((r_width/2, r_height/2), angle, 1)
im = cv2.warpAffine(im_padding, M, (r_width, r_height))
## polygons
new_polygons = _rotate_segms(polygons, -1*angle, (r_width/2, r_height/2), start_h, start_w)
return im, new_polygons
def _rotate_polygons(polygons, angle, r_c):
## polygons: N*8
## r_x: rotate center x
## r_y: rotate center y
## angle: -15~15
poly_list = _quad2boxlist(polygons)
rotate_boxes_list = []
for poly in poly_list:
box = Polygon(poly)
rbox = affinity.rotate(box, angle, r_c)
if len(list(rbox.exterior.coords))<5:
print(poly)
print(rbox)
# assert(len(list(rbox.exterior.coords))>=5)
rotate_boxes_list.append(rbox.boundary.coords[:-1])
res = _boxlist2quads(rotate_boxes_list)
return res
def _rotate_segms(polygons, angle, r_c, start_h, start_w):
## polygons: N*8
## r_x: rotate center x
## r_y: rotate center y
## angle: -15~15
poly_list=[]
for polygon in polygons:
tmp=[]
for i in range(int(len(polygon) / 2)):
tmp.append([polygon[2*i] + start_w, polygon[2*i+1] + start_h])
poly_list.append(tmp)
rotate_boxes_list = []
for poly in poly_list:
box = Polygon(poly)
rbox = affinity.rotate(box, angle, r_c)
if len(list(rbox.exterior.coords))<5:
print(poly)
print(rbox)
rotate_boxes_list.append(rbox.boundary.coords[:-1])
res = []
for i, box in enumerate(rotate_boxes_list):
tmp = []
for point in box:
tmp.append(point[0])
tmp.append(point[1])
res.append([tmp])
return res
def _read_gt(gt_path):
polygons = []
words = []
with open(gt_path, 'r') as fid:
lines = fid.readlines()
for line in lines:
line = line.strip()
polygon = line.split(',')[:8]
word = line.split(',')[8]
polygon = [float(x) for x in polygon]
polygons.append(polygon)
words.append(word)
return polygons, words
def format_new_gt(polygons, words, new_gt_path):
with open(new_gt_path, 'wt') as fid:
for polygon, word in zip(polygons, words):
# print(polygon)
polygon = [str(int(x)) for x in polygon[0]]
# polygon = [str(int(x)) for x in polygon]
line = ','.join(polygon) + ',' + word
# print(line)
fid.write(line+'\n')
def visu_gt(img, polygons, visu_path):
for polygon in polygons:
pts = np.array(polygon, np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
cv2.imwrite(visu_path, img)
img_dir = '../datasets/icdar2013/test_images'
gt_dir = '../datasets/icdar2013/test_gts'
angle = 45
new_img_dir = '../datasets/icdar2013/rotated_test_images'+'_'+str(angle)
new_gt_dir = '../datasets/icdar2013/rotated_test_gts'+'_'+str(angle)
if not os.path.isdir(new_img_dir):
os.mkdir(new_img_dir)
if not os.path.isdir(new_gt_dir):
os.mkdir(new_gt_dir)
visu_dir = '../output/visu/'
for i in range(233):
img_name = 'img_' + str(i+1) + '.jpg'
img_path = os.path.join(img_dir, img_name)
img = cv2.imread(img_path)
gt_path = os.path.join(gt_dir, img_name + '.txt')
new_img_path = os.path.join(new_img_dir, img_name)
visu_path = os.path.join(visu_dir, img_name)
new_gt_path = os.path.join(new_gt_dir, 'gt_' + img_name.split('.')[0] + '.txt')
polygons, words = _read_gt(gt_path)
# print(img_name)
if angle == 90:
(h, w) = img.shape[:2]
img = cv2.transpose(img)
img = cv2.flip(img,flipCode=0)
# M = cv2.getRotationMatrix2D(center, 90, 1)
# img = cv2.warpAffine(img, M, (h, w))
new_polygons = [[polygon[1], w-polygon[0], polygon[3], w-polygon[2], polygon[5], w-polygon[4], polygon[7], w-polygon[6]] for polygon in polygons]
else:
img, new_polygons = _rotate_image(img, polygons, angle)
format_new_gt(new_polygons, words, new_gt_path)
# visu_gt(img, new_polygons, visu_path)
cv2.imwrite(new_img_path, img)
| 6,623 | 36.213483 | 200 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/test_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.text_inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logging import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
# Check if we can enable mixed-precision via apex.amp
try:
from apex import amp
except ImportError:
raise ImportError('Use APEX for mixed precision via apex.amp')
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="./configs/seq.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.deprecated.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
# Initialize mixed-precision if necessary
use_mixed_precision = cfg.DTYPE == 'float16'
amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)
checkpointer = DetectronCheckpointer(cfg, model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
if cfg.OUTPUT_DIR:
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
model_name = cfg.MODEL.WEIGHT.split('/')[-1]
for output_folder, data_loader_val in zip(output_folders, data_loaders_val):
inference(
model,
data_loader_val,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
model_name=model_name,
cfg=cfg,
)
synchronize()
if __name__ == "__main__":
main()
| 3,686 | 34.451923 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/demo.py | import os
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.chars import getstr_grid, get_tight_rect
from PIL import Image
import numpy as np
import argparse
class TextDemo(object):
def __init__(
self,
cfg,
confidence_threshold=0.7,
min_image_size=224,
output_polygon=True
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
checkpointer = DetectronCheckpointer(cfg, self.model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.output_polygon = output_polygon
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
result_polygons (list): detection results
result_words (list): recognition results
"""
result_polygons, result_words = self.compute_prediction(image)
return result_polygons, result_words
def compute_prediction(self, original_image):
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions, _, _ = self.model(image_list)
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions['char_mask']
char_boxes = char_predictions['boxes']
words, rec_scores = self.process_char_mask(char_mask, char_boxes)
seq_words = char_predictions['seq_outputs']
seq_scores = char_predictions['seq_scores']
global_predictions = [o.to(self.cpu_device) for o in global_predictions]
# always single image is passed at a time
global_prediction = global_predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
scores = global_prediction.get_field("scores").tolist()
masks = global_prediction.get_field("mask").cpu().numpy()
result_polygons = []
result_words = []
for k, box in enumerate(boxes):
score = scores[k]
if score < self.confidence_threshold:
continue
box = list(map(int, box))
mask = masks[k,0,:,:]
polygon = self.mask2polygon(mask, box, original_image.shape, threshold=0.5, output_polygon=self.output_polygon)
if polygon is None:
polygon = [box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]]
result_polygons.append(polygon)
word = words[k]
rec_score = rec_scores[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
if seq_score > rec_score:
result_words.append(seq_word)
else:
result_words.append(word)
return result_polygons, result_words
def process_char_mask(self, char_masks, boxes, threshold=192):
texts, rec_scores = [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, _, _ = getstr_grid(char_masks[index,:,:,:].copy(), box, threshold=threshold)
texts.append(text)
rec_scores.append(rec_score)
return texts, rec_scores
def mask2polygon(self, mask, box, im_size, threshold=0.5, output_polygon=True):
# mask 32*128
image_width, image_height = im_size[1], im_size[0]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask*255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map=cv2.GaussianBlur(poly_map,(3,3),sigmaX=3)
ret, poly_map = cv2.threshold(poly_map,0.5,1,cv2.THRESH_BINARY)
if output_polygon:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
try:
_, contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except:
contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours)==0:
print(contours)
print(len(contours))
return None
max_area=0
max_cnt = contours[0]
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
perimeter = cv2.arcLength(max_cnt,True)
epsilon = 0.01*cv2.arcLength(max_cnt,True)
approx = cv2.approxPolyDP(max_cnt,epsilon,True)
pts = approx.reshape((-1,2))
pts[:,0] = pts[:,0] + box[0]
pts[:,1] = pts[:,1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon)<6:
return None
else:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
idy,idx=np.where(poly_map == 1)
xy=np.vstack((idx,idy))
xy=np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
#reverse order of points.
if hull is None:
return None
hull=hull[::-1]
#find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def visualization(self, image, polygons, words):
for polygon, word in zip(polygons, words):
pts = np.array(polygon, np.int32)
pts = pts.reshape((-1,1,2))
xmin = min(pts[:,0,0])
ymin = min(pts[:,0,1])
cv2.polylines(image,[pts],True,(0,0,255))
cv2.putText(image, word, (xmin, ymin), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
def main(args):
# update the config options with the config file
cfg.merge_from_file(args.config_file)
# manual override some options
# cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
text_demo = TextDemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
output_polygon=True
)
# load image and then run prediction
image = cv2.imread(args.image_path)
result_polygons, result_words = text_demo.run_on_opencv_image(image)
text_demo.visualization(image, result_polygons, result_words)
cv2.imwrite(args.visu_path, image)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='parameters for demo')
parser.add_argument("--config-file", type=str, default='configs/mixtrain/seg_rec_poly_fuse_feature.yaml')
parser.add_argument("--image_path", type=str, default='./demo_images/demo.jpg')
parser.add_argument("--visu_path", type=str, default='./demo_images/demo_results.jpg')
args = parser.parse_args()
main(args) | 9,628 | 39.288703 | 123 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logging import setup_logger, Logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
# See if we can use apex.DistributedDataParallel instead of the torch default,
# and enable mixed-precision via apex.amp
try:
from apex import amp
except ImportError:
raise ImportError('Use APEX for multi-precision via apex.amp')
def train(cfg, local_rank, distributed):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
# Initialize mixed-precision training
use_mixed_precision = cfg.DTYPE == "float16"
amp_opt_level = 'O1' if use_mixed_precision else 'O0'
model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
# find_unused_parameters=True
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.SOLVER.RESUME)
if cfg.SOLVER.RESUME:
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
tb_logger = Logger(cfg.OUTPUT_DIR, local_rank)
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
local_rank,
)
return model
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
local_rank = get_rank()
logger = setup_logger("maskrcnn_benchmark", output_dir, local_rank)
if local_rank == 0:
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args.local_rank, args.distributed)
if __name__ == "__main__":
main()
| 4,818 | 30.292208 | 89 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/solver/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
pow_schedule_mode = False,
max_iter = 300000,
lr_pow = 0.9
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.pow_schedule_mode = pow_schedule_mode
self.max_iter = max_iter
self.lr_pow = lr_pow
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
if self.pow_schedule_mode:
scale_running_lr = ((1. - float(self.last_epoch) / self.max_iter) ** self.lr_pow)
return [
base_lr * warmup_factor * scale_running_lr
for base_lr in self.base_lrs
]
else:
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,292 | 33.742424 | 93 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/solver/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_optimizer
from .build import make_lr_scheduler
from .lr_scheduler import WarmupMultiStepLR
| 187 | 36.6 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/solver/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.USE_ADAM:
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
else:
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
pow_schedule_mode = cfg.SOLVER.POW_SCHEDULE,
max_iter = cfg.SOLVER.MAX_ITER,
)
| 1,176 | 29.973684 | 79 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/config/defaults.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.RPN_ONLY = False
_C.MODEL.MASK_ON = False
_C.MODEL.SEG_ON = False
_C.MODEL.CHAR_MASK_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
_C.MODEL.TRAIN_DETECTION_ONLY = False
_C.MODEL.RESNET34 = False
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
_C.SEQUENCE = CN()
_C.SEQUENCE.SEQ_ON = False
_C.SEQUENCE.NUM_CHAR = 38
_C.SEQUENCE.BOS_TOKEN = 0
_C.SEQUENCE.MAX_LENGTH = 32
_C.SEQUENCE.TEACHER_FORCE_RATIO = 1.0
_C.SEQUENCE.TWO_CONV = False
_C.SEQUENCE.MEAN_SCORE = False
_C.SEQUENCE.RESIZE_HEIGHT = 16
_C.SEQUENCE.RESIZE_WIDTH = 64
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,) # (800,)
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [1.0, 1.0, 1.0]
# Convert image to BGR format (for Caffe2 models), in range 0-255
_C.INPUT.TO_BGR255 = True
_C.INPUT.STRICT_RESIZE = False
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.TRAIN = ()
# List of the dataset names for testing, as present in paths_catalog.py
_C.DATASETS.TEST = ()
_C.DATASETS.RATIOS = []
_C.DATASETS.AUG = False
_C.DATASETS.RANDOM_CROP_PROB = 0.0
_C.DATASETS.IGNORE_DIFFICULT = False
_C.DATASETS.FIX_CROP = False
_C.DATASETS.CROP_SIZE = (512, 512)
_C.DATASETS.MAX_ROTATE_THETA = 30
_C.DATASETS.FIX_ROTATE = False
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If > 0, this enforces that each collated batch should have a size divisible
# by SIZE_DIVISIBILITY
_C.DATALOADER.SIZE_DIVISIBILITY = 0
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
_C.MODEL.BACKBONE.CONV_BODY = "R-50-C4"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2
_C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Residual transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256 * 4
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
_C.MODEL.RESNETS.STAGE_WITH_DCN = (False, False, False, False)
_C.MODEL.RESNETS.WITH_MODULATED_DCN = False
_C.MODEL.RESNETS.DEFORMABLE_GROUPS = 1
_C.MODEL.RESNETS.LAYERS = (3, 4, 6, 3)
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.USE_FPN = False
# Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
_C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512)
# Stride of the feature map that RPN is attached.
# For FPN, number of strides should match number of scales
_C.MODEL.RPN.ANCHOR_STRIDE = (16,)
# RPN anchor aspect ratios
_C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.STRADDLE_THRESH = 0
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example)
_C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example)
_C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
_C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.RPN.MIN_SIZE = 0
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000
_C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000
_C.MODEL.SEG = CN()
_C.MODEL.SEG.USE_FPN = False
_C.MODEL.SEG.USE_FUSE_FEATURE = False
# Total number of SEG examples per image
_C.MODEL.SEG.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per SEG minibatch
_C.MODEL.SEG.POSITIVE_FRACTION = 0.5
# NMS threshold used on SEG proposals
_C.MODEL.SEG.BINARY_THRESH = 0.5
_C.MODEL.SEG.USE_MULTIPLE_THRESH = False
_C.MODEL.SEG.MULTIPLE_THRESH = (0.2, 0.3, 0.5, 0.7)
_C.MODEL.SEG.BOX_THRESH = 0.7
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.SEG.MIN_SIZE = 0
_C.MODEL.SEG.SHRINK_RATIO = 0.5
# Number of top scoring RPN proposals to keep after combining proposals from
# all FPN levels
_C.MODEL.SEG.TOP_N_TRAIN = 1000
_C.MODEL.SEG.TOP_N_TEST = 1000
_C.MODEL.SEG.AUG_PROPOSALS = False
_C.MODEL.SEG.IGNORE_DIFFICULT = True
_C.MODEL.SEG.EXPAND_RATIO = 1.6
_C.MODEL.SEG.BOX_EXPAND_RATIO = 1.5
_C.MODEL.SEG.USE_SEG_POLY = False
_C.MODEL.SEG.USE_PPM = False
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.USE_FPN = False
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5
# Overlap threshold for an RoI to be considered background
# (class = 0 if overlap in [0, BG_IOU_THRESHOLD))
_C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS
# E.g., a common configuration is: 512 * 2 * 8 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
# _C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05
_C.MODEL.ROI_HEADS.SCORE_THRESH = 0.0
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS = 0.5
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
_C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100
_C.MODEL.ROI_BOX_HEAD = CN()
_C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor"
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81
# Hidden layer dimension when using an MLP for the RoI box head
_C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_BOX_HEAD.USE_REGRESSION = True
_C.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX = True
_C.MODEL.ROI_BOX_HEAD.USE_MASKED_FEATURE = False
_C.MODEL.ROI_BOX_HEAD.SOFT_MASKED_FEATURE_RATIO = 0.
_C.MODEL.ROI_BOX_HEAD.MIX_OPTION = ""
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor"
_C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_H = 32
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION_W = 128
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,)
_C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024
_C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256)
_C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.RESOLUTION_H = 32
_C.MODEL.ROI_MASK_HEAD.RESOLUTION_W = 128
_C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True
_C.MODEL.ROI_MASK_HEAD.CHAR_NUM_CLASSES = 38
_C.MODEL.ROI_MASK_HEAD.USE_WEIGHTED_CHAR_MASK = False
_C.MODEL.ROI_MASK_HEAD.MASK_BATCH_SIZE_PER_IM = 64
_C.MODEL.ROI_MASK_HEAD.USE_MASKED_FEATURE = False
_C.MODEL.ROI_MASK_HEAD.SOFT_MASKED_FEATURE_RATIO = 0.
_C.MODEL.ROI_MASK_HEAD.MIX_OPTION = ""
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 2
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 16
_C.SOLVER.RESUME = True
_C.SOLVER.USE_ADAM = False
_C.SOLVER.POW_SCHEDULE = False
_C.SOLVER.DISPLAY_FREQ = 20
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
_C.TEST.EXPECTED_RESULTS = []
_C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST.IMS_PER_BATCH = 8
_C.TEST.VIS = False
# from 0 to 255
_C.TEST.CHAR_THRESH = 128
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "."
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
# ---------------------------------------------------------------------------- #
# Precision options
# ---------------------------------------------------------------------------- #
# Precision of input, allowable: (float32, float16)
_C.DTYPE = "float32"
# Enable verbosity in apex.amp
_C.AMP_VERBOSE = False | 14,283 | 37.294906 | 83 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/config/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .defaults import _C as cfg
| 104 | 34 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/config/paths_catalog.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "datasets"
# DATA_DIR = "/share/mhliao/MaskTextSpotterV3/datasets/"
DATASETS = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"icdar_2013_train": ("icdar2013/train_images", "icdar2013/train_gts"),
"icdar_2013_test": ("icdar2013/test_images", "icdar2013/test_gts"),
"rotated_ic13_test_0": ("icdar2013/rotated_test_images_0", "icdar2013/rotated_test_gts_0"),
"rotated_ic13_test_15": ("icdar2013/rotated_test_images_15", "icdar2013/rotated_test_gts_15"),
"rotated_ic13_test_30": ("icdar2013/rotated_test_images_30", "icdar2013/rotated_test_gts_30"),
"rotated_ic13_test_45": ("icdar2013/rotated_test_images_45", "icdar2013/rotated_test_gts_45"),
"rotated_ic13_test_60": ("icdar2013/rotated_test_images_60", "icdar2013/rotated_test_gts_60"),
"rotated_ic13_test_75": ("icdar2013/rotated_test_images_75", "icdar2013/rotated_test_gts_75"),
"rotated_ic13_test_85": ("icdar2013/rotated_test_images_85", "icdar2013/rotated_test_gts_85"),
"rotated_ic13_test_90": ("icdar2013/rotated_test_images_90", "icdar2013/rotated_test_gts_90"),
"rotated_ic13_test_-15": ("icdar2013/rotated_test_images_-15", "icdar2013/rotated_test_gts_-15"),
"rotated_ic13_test_-30": ("icdar2013/rotated_test_images_-30", "icdar2013/rotated_test_gts_-30"),
"rotated_ic13_test_-45": ("icdar2013/rotated_test_images_-45", "icdar2013/rotated_test_gts_-45"),
"rotated_ic13_test_-60": ("icdar2013/rotated_test_images_-60", "icdar2013/rotated_test_gts_-60"),
"rotated_ic13_test_-75": ("icdar2013/rotated_test_images_-75", "icdar2013/rotated_test_gts_-75"),
"rotated_ic13_test_-90": ("icdar2013/rotated_test_images_-90", "icdar2013/rotated_test_gts_-90"),
"icdar_2015_train": ("icdar2015/train_images", "icdar2015/train_gts"),
"icdar_2015_test": (
"icdar2015/test_images",
# "icdar2015/test_gts",
),
"synthtext_train": ("synthtext/train_images", "synthtext/train_gts"),
"synthtext_test": ("synthtext/test_images", "synthtext/test_gts"),
"total_text_train": ("total_text/train_images", "total_text/train_gts"),
"td500_train": ("TD_TR/TD500/train_images", "TD500/train_gts"),
"td500_test": ("TD_TR/TD500/test_images", ),
"tr400_train": ("TD_TR/TR400/train_images", "TR400/train_gts"),
"total_text_test": (
"total_text/test_images",
# "total_text/test_gts",
),
"scut-eng-char_train": (
"scut-eng-char/train_images",
"scut-eng-char/train_gts",
),
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs[0]),
ann_file=os.path.join(data_dir, attrs[1]),
)
return dict(factory="COCODataset", args=args)
elif "icdar_2013" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/icdar2013/icdar2013/train_images',
# gts_dir='/tmp/icdar2013/icdar2013/train_gts',
)
return dict(args=args, factory="IcdarDataset")
elif "rotated_ic13" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
)
return dict(args=args, factory="IcdarDataset")
elif "icdar_2015" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
# imgs_dir='/tmp/icdar2015/icdar2015/train_images/',
# gts_dir='/tmp/icdar2015/icdar2015/train_gts/',
)
return dict(args=args, factory="IcdarDataset")
elif "synthtext" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
list_file_path=os.path.join(data_dir, "synthtext/train_list.txt"),
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/synth/SynthText/',
# gts_dir='/tmp/synth_gt/SynthText_GT_E2E/',
)
return dict(args=args, factory="SynthtextDataset")
elif "total_text" in name:
data_dir = DatasetCatalog.DATA_DIR
# data_dir = '/tmp/total_text/'
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
# imgs_dir='/tmp/total_text/total_text/train_images/',
# gts_dir='/tmp/total_text/total_text/train_gts/',
)
return dict(args=args, factory="TotaltextDataset")
elif "scut-eng-char" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/scut-eng-char/scut-eng-char/train_images/',
# gts_dir='/tmp/scut-eng-char/scut-eng-char/train_gts/',
)
return dict(args=args, factory="ScutDataset")
elif "td500" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
)
return dict(args=args, factory="TotaltextDataset")
elif "tr400" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
)
return dict(args=args, factory="TotaltextDataset")
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/") :]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
if 'resnet34' in name or 'resnet18' in name:
return name
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/") :]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 11,140 | 45.810924 | 121 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/nms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from ._utils import _C
from maskrcnn_benchmark import _C
# nms = _C.nms
from apex import amp
# Only valid with fp32 inputs - give AMP the hint
nms = amp.float_function(_C.nms)
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
| 323 | 26 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/batch_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
# Cast all fixed parameters to half() if necessary
if x.dtype == torch.float16:
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
| 1,093 | 34.290323 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/roi_pool.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = _C.roi_pool_forward(
input, roi, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
@amp.float_function
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
| 1,899 | 28.230769 | 74 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/roi_align.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
@amp.float_function
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 2,154 | 29.785714 | 85 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/smooth_l1_loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# TODO maybe push this to nn?
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
| 481 | 27.352941 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load as load_ext
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.dirname(this_dir)
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
source = main_file + source_cpu
extra_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
source.extend(source_cuda)
extra_cflags = ["-DWITH_CUDA"]
source = [os.path.join(this_dir, s) for s in source]
extra_include_paths = [this_dir]
return load_ext(
"torchvision",
source,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
)
_C = _load_C_extensions()
| 1,165 | 28.15 | 80 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert len(kernel_size) == 2
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from maskrcnn_benchmark.layers import ModulatedDeformConv
offset_channels = offset_base_channels * 3 #default: 27
conv_block = ModulatedDeformConv
else:
from maskrcnn_benchmark.layers import DeformConv
offset_channels = offset_base_channels * 2 #default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride= stride,
padding= dilation,
groups=1,
dilation=dilation
)
for l in [self.offset,]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride= stride,
padding=dilation,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = dilation
self.dilation = dilation
def forward(self, x):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, -9:, :, :].sigmoid()
x = self.conv(x, offset, mask)
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
| 6,035 | 31.451613 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .batch_norm import FrozenBatchNorm2d
from .misc import Conv2d
from .misc import DFConv2d
from .misc import ConvTranspose2d
from .misc import interpolate
from .nms import nms
from .roi_align import ROIAlign
from .roi_align import roi_align
from .roi_pool import ROIPool
from .roi_pool import roi_pool
from .smooth_l1_loss import smooth_l1_loss
from .dcn.deform_conv_func import deform_conv, modulated_deform_conv
from .dcn.deform_conv_module import DeformConv, ModulatedDeformConv, ModulatedDeformConvPack
from .dcn.deform_pool_func import deform_roi_pooling
from .dcn.deform_pool_module import DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack
__all__ = [
"nms",
"roi_align",
"ROIAlign",
"roi_pool",
"ROIPool",
"smooth_l1_loss",
"Conv2d",
"DFConv2d",
"ConvTranspose2d",
"interpolate",
"BatchNorm2d",
"FrozenBatchNorm2d",
'deform_conv',
'modulated_deform_conv',
'DeformConv',
'ModulatedDeformConv',
'ModulatedDeformConvPack',
'deform_roi_pooling',
'DeformRoIPooling',
'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack',
]
# __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "FrozenBatchNorm2d"]
| 1,372 | 30.930233 | 150 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_conv_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
class DeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(
input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
DeformConvFunction._output_size(input, weight, ctx.padding,
ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_parameters(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step
)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
'x'.join(map(str, output_size))))
return output_size
class ModulatedDeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError
if weight.requires_grad or mask.requires_grad or offset.requires_grad \
or input.requires_grad:
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(
ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
if not ctx.with_bias:
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
None, None, None, None, None)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (height + 2 * ctx.padding -
(ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1
width_out = (width + 2 * ctx.padding -
(ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = DeformConvFunction.apply
modulated_deform_conv = ModulatedDeformConvFunction.apply
| 8,309 | 30.596958 | 83 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_pool_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(
ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0
):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
if not data.is_cuda:
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
_C.deform_psroi_pooling_forward(
data,
rois,
offset,
output,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
_C.deform_psroi_pooling_backward(
grad_output,
data,
rois,
offset,
output_count,
grad_input,
grad_offset,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
| 2,595 | 26.041667 | 99 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_pool_module.py | from torch import nn
from .deform_pool_func import deform_roi_pooling
class DeformRoIPooling(nn.Module):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = out_size
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(DeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(
spatial_scale, out_size, out_channels, no_trans, group_size,
part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
self.mask_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 1),
nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
mask = self.mask_fc(x.view(n, -1))
mask = mask.view(n, 1, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std) * mask
| 6,307 | 40.774834 | 79 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/__init__.py | #
# Copied From [mmdetection](https://github.com/open-mmlab/mmdetection/tree/master/mmdet/ops/dcn)
#
| 101 | 24.5 | 96 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_conv_module.py | import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from .deform_conv_func import deform_conv, modulated_deform_conv
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False
):
assert not bias
super(DeformConv, self).__init__()
self.with_bias = bias
assert in_channels % groups == 0, \
'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, \
'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups,
*self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride,
self.padding, self.dilation, self.groups,
self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True
):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels,
in_channels // groups,
*self.kernel_size
))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input, offset, mask):
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConvPack, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, deformable_groups, bias)
self.conv_offset_mask = nn.Conv2d(
self.in_channels // self.groups,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
| 5,802 | 31.601124 | 78 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/engine/text_inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import os
import pickle
import subprocess
import time
import cv2
import numpy as np
import torch
from maskrcnn_benchmark.utils.chars import char2num, get_tight_rect, getstr_grid
from PIL import Image, ImageDraw
from tqdm import tqdm
from ..utils.comm import is_main_process, scatter_gather, synchronize
import pdb
# TO DO: format output with dictionnary
def compute_on_dataset(model, data_loader, device, cfg):
model.eval()
results_dict = {}
seg_results = []
cpu_device = torch.device("cpu")
total_time = 0
for _, batch in tqdm(enumerate(data_loader)):
images, targets, image_paths = batch
images = images.to(device)
with torch.no_grad():
if cfg.MODEL.SEG_ON:
predictions, proposals, seg_results_dict = model(
images
)
seg_results.append(
[image_paths, proposals, seg_results_dict['rotated_boxes'], seg_results_dict['polygons'], seg_results_dict['preds'], seg_results_dict['scores']]
)
# if cfg.MODEL.MASK_ON and predictions is not None:
if predictions is not None:
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions["char_mask"]
boxes = char_predictions["boxes"]
seq_words = char_predictions["seq_outputs"]
seq_scores = char_predictions["seq_scores"]
detailed_seq_scores = char_predictions["detailed_seq_scores"]
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
char_mask,
boxes,
seq_words,
seq_scores,
detailed_seq_scores,
]
}
)
else:
global_predictions = [o.to(cpu_device) for o in predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
]
}
)
else:
predictions = model(images)
if predictions is not None:
if not (cfg.MODEL.CHAR_MASK_ON and cfg.SEQUENCE.SEQ_ON):
global_predictions = predictions
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
]
}
)
else:
global_predictions = predictions[0]
char_predictions = predictions[1]
if cfg.MODEL.CHAR_MASK_ON:
char_mask = char_predictions["char_mask"]
else:
char_mask = None
boxes = char_predictions["boxes"]
seq_words = char_predictions["seq_outputs"]
seq_scores = char_predictions["seq_scores"]
detailed_seq_scores = char_predictions["detailed_seq_scores"]
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
char_mask,
boxes,
seq_words,
seq_scores,
detailed_seq_scores,
]
}
)
return results_dict, seg_results
def polygon2rbox(polygon, image_height, image_width):
poly = np.array(polygon).reshape((-1, 2))
rect = cv2.minAreaRect(poly)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, 0, 0, image_height, image_width, 1)
pts = list(map(int, pts))
return pts
def mask2polygon(mask, box, im_size, threshold=0.5, output_folder=None):
# mask 32*128
image_width, image_height = im_size[0], im_size[1]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask * 255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map = cv2.GaussianBlur(poly_map, (3, 3), sigmaX=3)
ret, poly_map = cv2.threshold(poly_map, threshold, 1, cv2.THRESH_BINARY)
if "total_text" in output_folder or "cute80" in output_folder:
SE1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
poly_map = cv2.erode(poly_map, SE1)
poly_map = cv2.dilate(poly_map, SE1)
poly_map = cv2.morphologyEx(poly_map, cv2.MORPH_CLOSE, SE1)
try:
_, contours, _ = cv2.findContours(
(poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
)
except:
contours, _ = cv2.findContours(
(poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
)
if len(contours) == 0:
# print(contours)
# print(len(contours))
return None
max_area = 0
max_cnt = contours[0]
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
# perimeter = cv2.arcLength(max_cnt, True)
epsilon = 0.01 * cv2.arcLength(max_cnt, True)
approx = cv2.approxPolyDP(max_cnt, epsilon, True)
pts = approx.reshape((-1, 2))
pts[:, 0] = pts[:, 0] + box[0]
pts[:, 1] = pts[:, 1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon) < 6:
return None
else:
SE1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
poly_map = cv2.erode(poly_map, SE1)
poly_map = cv2.dilate(poly_map, SE1)
poly_map = cv2.morphologyEx(poly_map, cv2.MORPH_CLOSE, SE1)
idy, idx = np.where(poly_map == 1)
xy = np.vstack((idx, idy))
xy = np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
# reverse order of points.
if hull is None:
return None
hull = hull[::-1]
# find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = scatter_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
return predictions
def format_output(out_dir, boxes, img_name):
with open(
os.path.join(out_dir, "res_" + img_name.split(".")[0] + ".txt"), "wt"
) as res:
## char score save dir
ssur_name = os.path.join(out_dir, "res_" + img_name.split(".")[0])
for i, box in enumerate(boxes):
save_name = ssur_name + "_" + str(i) + ".pkl"
save_dict = {}
if "total_text" in out_dir or "cute80" in out_dir:
# np.save(save_name, box[-2])
save_dict["seg_char_scores"] = box[-3]
save_dict["seq_char_scores"] = box[-2]
box = (
",".join([str(x) for x in box[:4]])
+ ";"
+ ",".join([str(x) for x in box[4 : 4 + int(box[-1])]])
+ ";"
+ ",".join([str(x) for x in box[4 + int(box[-1]) : -3]])
+ ","
+ save_name
)
else:
save_dict["seg_char_scores"] = box[-2]
save_dict["seq_char_scores"] = box[-1]
np.save(save_name, box[-1])
box = ",".join([str(x) for x in box[:-2]]) + "," + save_name
with open(save_name, "wb") as f:
pickle.dump(save_dict, f, protocol=2)
res.write(box + "\n")
def format_seg_output(results_dir, rotated_boxes_this_image, polygons_this_image, scores, img_name, ratio):
height_ratio, width_ratio = ratio
with open(
os.path.join(results_dir, "res_" + img_name.split(".")[0] + ".txt"), "wt"
) as res:
if "total_text" in results_dir or "cute80" in results_dir:
for i, box in enumerate(polygons_this_image):
box = box[0]
box[0::2] = box[0::2] * width_ratio
box[1::2] = box[1::2] * height_ratio
save_dict = {}
# result = ",".join([str(int(x[0])) + ',' +str(int(x[1])) for x in box])
result = ",".join([str(int(x)) for x in box])
score = scores[i].item()
res.write(result + ',' + str(score) + "\n")
else:
for i, box in enumerate(rotated_boxes_this_image):
box[0::2] = box[0::2] * width_ratio
box[1::2] = box[1::2] * height_ratio
save_dict = {}
result = ",".join([str(int(x[0])) + ',' +str(int(x[1])) for x in box])
score = scores[i].item()
res.write(result + ',' + str(score) + "\n")
def process_char_mask(char_masks, boxes, threshold=192):
texts, rec_scores, rec_char_scores, char_polygons = [], [], [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, rec_char_score, char_polygon = getstr_grid(
char_masks[index, :, :, :].copy(), box, threshold=threshold
)
texts.append(text)
rec_scores.append(rec_score)
rec_char_scores.append(rec_char_score)
char_polygons.append(char_polygon)
# segmss.append(segms)
return texts, rec_scores, rec_char_scores, char_polygons
def creat_color_map(n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append((r, g, b, 200))
return maps
def visualization(image, polygons, resize_ratio, colors, char_polygons=None, words=None):
draw = ImageDraw.Draw(image, "RGBA")
for polygon in polygons:
# draw.polygon(polygon, fill=None, outline=(0, 255, 0, 255))
# print(polygon)
polygon.append(polygon[0])
polygon.append(polygon[1])
# print(polygon)
color = '#33FF33'
draw.line(polygon, fill=color, width=5)
# if char_polygons is not None:
# for i, char_polygon in enumerate(char_polygons):
# for j, polygon in enumerate(char_polygon):
# polygon = [int(x * resize_ratio) for x in polygon]
# char = words[i][j]
# color = colors[char2num(char)]
# draw.polygon(polygon, fill=color, outline=color)
def vis_seg_map(image_path, seg_map, rotated_boxes, polygons_this_image, proposals, vis_dir):
img_name = image_path.split("/")[-1]
image = cv2.imread(image_path)
height, width, _ = image.shape
seg_map = seg_map.data.cpu().numpy()
img = Image.fromarray(image).convert("RGB")
# height_ratio = height / seg_map.shape[1]
# width_ratio = width / seg_map.shape[2]
# print('seg_map.shape:', seg_map.shape)
# print('image.shape:', image.shape)
seg_image = (
Image.fromarray((seg_map[0, :proposals.size[1], :proposals.size[0]] * 255).astype(np.uint8))
.convert("RGB")
.resize((width, height))
)
visu_image = Image.blend(seg_image, img, 0.5)
img_draw = ImageDraw.Draw(visu_image)
if "total_text" in vis_dir or "cute80" in vis_dir:
for box in polygons_this_image:
# box[:, 0] = box[:, 0]
# box[:, 1] = box[:, 1]
tuple_box = [tuple(x) for x in box[0].reshape(-1, 2).tolist()]
tuple_box.append(tuple_box[0])
img_draw.line(tuple_box, fill=(0, 255, 0), width=5)
else:
for box in rotated_boxes:
# box[:, 0] = box[:, 0]
# box[:, 1] = box[:, 1]
tuple_box = [tuple(x) for x in box.tolist()]
tuple_box.append(tuple_box[0])
img_draw.line(tuple_box, fill=(0, 255, 0), width=5)
visu_image.save(vis_dir + "/seg_" + img_name)
def prepare_results_for_evaluation(
predictions, output_folder, model_name, seg_predictions=None, vis=False, cfg=None
):
results_dir = os.path.join(output_folder, model_name + "_results")
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
seg_results_dir = os.path.join(output_folder, model_name + "_seg_results")
if not os.path.isdir(seg_results_dir):
os.mkdir(seg_results_dir)
if vis:
visu_dir = os.path.join(output_folder, model_name + "_visu")
if not os.path.isdir(visu_dir):
os.mkdir(visu_dir)
seg_visu_dir = os.path.join(output_folder, model_name + "_seg_visu")
if not os.path.isdir(seg_visu_dir):
os.mkdir(seg_visu_dir)
if len(seg_predictions) > 0:
for seg_prediction in seg_predictions:
image_paths, proposals, rotated_boxes, polygons, seg_maps, seg_scores = (
seg_prediction[0],
seg_prediction[1],
seg_prediction[2],
seg_prediction[3],
seg_prediction[4],
seg_prediction[5],
)
for batch_id in range(len(image_paths)):
image_path = image_paths[batch_id]
im_name = image_path.split("/")[-1]
image = cv2.imread(image_path)
height, width, _ = image.shape
rotated_boxes_this_image = rotated_boxes[batch_id]
polygons_this_image = polygons[batch_id]
proposals_this_image = proposals[batch_id]
seg_map = seg_maps[batch_id]
seg_score = seg_scores[batch_id]
height, width, _ = image.shape
height_ratio = height / proposals_this_image.size[1]
width_ratio = width / proposals_this_image.size[0]
format_seg_output(seg_results_dir, rotated_boxes_this_image, polygons_this_image, seg_score, im_name, (height_ratio, width_ratio))
if vis:
vis_seg_map(image_path, seg_map, rotated_boxes_this_image, polygons_this_image, proposals_this_image, seg_visu_dir)
if (not cfg.MODEL.TRAIN_DETECTION_ONLY):
for image_path, prediction in predictions.items():
im_name = image_path.split("/")[-1]
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
global_prediction, char_mask, boxes_char, seq_words, seq_scores, detailed_seq_scores = (
prediction[0],
prediction[1],
prediction[2],
prediction[3],
prediction[4],
prediction[5],
)
if char_mask is not None:
words, rec_scores, rec_char_scoress, char_polygons = process_char_mask(
char_mask, boxes_char
)
else:
global_prediction = prediction[0]
test_image_width, test_image_height = global_prediction.size
img = Image.open(image_path)
width, height = img.size
resize_ratio = float(height) / test_image_height
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
if cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX:
scores = global_prediction.get_field("scores").tolist()
if not cfg.MODEL.SEG.USE_SEG_POLY:
masks = global_prediction.get_field("mask").cpu().numpy()
else:
masks = global_prediction.get_field("masks").get_polygons()
result_logs = []
polygons = []
for k, box in enumerate(boxes):
if box[2] - box[0] < 1 or box[3] - box[1] < 1:
continue
box = list(map(int, box))
if not cfg.MODEL.SEG.USE_SEG_POLY:
mask = masks[k, 0, :, :]
polygon = mask2polygon(
mask, box, img.size, threshold=0.5, output_folder=output_folder
)
else:
polygon = list(masks[k].get_polygons()[0].cpu().numpy())
if not ("total_text" in output_folder or "cute80" in output_folder):
polygon = polygon2rbox(polygon, height, width)
if polygon is None:
polygon = [
box[0],
box[1],
box[2],
box[1],
box[2],
box[3],
box[0],
box[3],
]
continue
polygons.append(polygon)
if cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX:
score = scores[k]
else:
score = 1.0
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
if char_mask is None:
word = 'aaa'
rec_score = 1.0
char_score = None
else:
word = words[k]
rec_score = rec_scores[k]
char_score = rec_char_scoress[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
detailed_seq_score = detailed_seq_scores[k]
detailed_seq_score = np.squeeze(np.array(detailed_seq_score), axis=1)
else:
word = 'aaa'
rec_score = 1.0
char_score = [1.0, 1.0, 1.0]
seq_word = 'aaa'
seq_char_scores = [1.0, 1.0, 1.0]
seq_score = 1.0
detailed_seq_score = None
if "total_text" in output_folder or "cute80" in output_folder:
result_log = (
[int(x * 1.0) for x in box[:4]]
+ polygon
+ [word]
+ [seq_word]
+ [score]
+ [rec_score]
+ [seq_score]
+ [char_score]
+ [detailed_seq_score]
+ [len(polygon)]
)
else:
result_log = (
[int(x * 1.0) for x in box[:4]]
+ polygon
+ [word]
+ [seq_word]
+ [score]
+ [rec_score]
+ [seq_score]
+ [char_score]
+ [detailed_seq_score]
)
result_logs.append(result_log)
if vis:
colors = creat_color_map(37, 255)
if cfg.MODEL.CHAR_MASK_ON:
visualization(img, polygons, resize_ratio, colors, char_polygons, words)
else:
visualization(img, polygons, resize_ratio, colors)
img.save(os.path.join(visu_dir, im_name))
format_output(results_dir, result_logs, im_name)
def inference(
model,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
model_name=None,
cfg=None,
):
# convert to a torch.device for efficiency
model_name = model_name.split(".")[0] + "_" + str(cfg.INPUT.MIN_SIZE_TEST)
predictions_path = os.path.join(output_folder, model_name + "_predictions.pth")
seg_predictions_path = os.path.join(
output_folder, model_name + "_seg_predictions.pth"
)
# if os.path.isfile(predictions_path) and os.path.isfile(seg_predictions_path):
if False:
predictions = torch.load(predictions_path)
seg_predictions = torch.load(seg_predictions_path)
else:
device = torch.device(device)
num_devices = (
torch.distributed.get_world_size()
if torch.distributed.is_initialized()
else 1
)
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} images".format(len(dataset)))
start_time = time.time()
predictions, seg_predictions = compute_on_dataset(
model, data_loader, device, cfg
)
# wait for all processes to complete before measuring the time
synchronize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info(
"Total inference time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
# predictions = _accumulate_predictions_from_multiple_gpus(predictions)
# if not is_main_process():
# return
if output_folder:
torch.save(predictions, predictions_path)
torch.save(seg_predictions, seg_predictions_path)
prepare_results_for_evaluation(
predictions,
output_folder,
model_name,
seg_predictions=seg_predictions,
vis=cfg.TEST.VIS,
cfg=cfg
)
| 23,471 | 40.839572 | 164 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/engine/trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
from maskrcnn_benchmark.utils.comm import get_world_size, is_main_process
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
import torch.distributed as dist
from apex import amp
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k, v in loss_dict.items():
loss_names.append(k)
all_losses.append(v)
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
local_rank,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
# losses.backward()
# Note: If mixed precision is not used, this ends up doing nothing
# Otherwise apply loss scaling for mixed-precision recipe
with amp.scale_loss(losses, optimizer) as scaled_losses:
scaled_losses.backward()
if cfg.SOLVER.USE_ADAM:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if local_rank == 0 and (iteration % cfg.SOLVER.DISPLAY_FREQ == 0 or iteration == (max_iter - 1)):
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
for tag, value in loss_dict_reduced.items():
tb_logger.scalar_summary(tag, value.item(), iteration)
if local_rank == 0 and iteration % checkpoint_period == 0 and iteration > 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if local_rank == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
| 4,397 | 34.184 | 105 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/c2_model_loading.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
logger = logging.getLogger(__name__)
logger.info("Remapping conv weights for deformable conv weights")
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*layer{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
logger.info("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
}
def load_c2_format(cfg, f):
# TODO make it support other architectures
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-FPN", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
# ***********************************
# for deformable convolutional layer
state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg)
# ***********************************
return dict(model=state_dict)
| 6,944 | 39.144509 | 129 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/metric_logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
return object.__getattr__(self, attr)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
| 1,714 | 25.796875 | 82 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.c2_model_loading import load_c2_format
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.model_zoo import cache_url
class Checkpointer(object):
def __init__(
self,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
if not self.save_dir:
return
if not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, resume=False):
if self.has_checkpoint():
# override argument with existing checkpoint
f = self.get_checkpoint_file()
if not f:
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
if resume:
if "optimizer" in checkpoint and self.optimizer:
self.logger.info("Loading optimizer from {}".format(f))
self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
if "scheduler" in checkpoint and self.scheduler:
self.logger.info("Loading scheduler from {}".format(f))
self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
load_state_dict(self.model, checkpoint.pop("model"))
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
| 4,813 | 33.385714 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/comm.py | # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# """
# This file contains primitives for multi-gpu communication.
# This is useful when doing distributed training.
# """
# import os
# import pickle
# import tempfile
# import time
# import torch
# import torch.distributed as dist
# # def get_world_size():
# # if not dist.is_initialized():
# # return 1
# # return dist.get_world_size()
# #
# #
# # def is_main_process():
# # if not dist.is_initialized():
# # return True
# # return dist.get_rank() == 0
# #
# # def get_rank():
# # if not dist.is_initialized():
# # return 0
# # return dist.get_rank()
# #
# # def synchronize():
# # """
# # Helper function to synchronize between multiple processes when
# # using distributed training
# # """
# # if not dist.is_initialized():
# # return
# # world_size = dist.get_world_size()
# # rank = dist.get_rank()
# # if world_size == 1:
# # return
# #
# # def _send_and_wait(r):
# # if rank == r:
# # tensor = torch.tensor(0, device="cuda")
# # else:
# # tensor = torch.tensor(1, device="cuda")
# # dist.broadcast(tensor, r)
# # while tensor.item() == 1:
# # time.sleep(1)
# #
# # _send_and_wait(0)
# # # now sync on the main process
# # _send_and_wait(1)
# #
# #
# def _encode(encoded_data, data):
# # gets a byte representation for the data
# encoded_bytes = pickle.dumps(data)
# # convert this byte string into a byte tensor
# storage = torch.ByteStorage.from_buffer(encoded_bytes)
# tensor = torch.ByteTensor(storage).to("cuda")
# # encoding: first byte is the size and then rest is the data
# s = tensor.numel()
# assert s <= 255, "Can't encode data greater than 255 bytes"
# # put the encoded data in encoded_data
# encoded_data[0] = s
# encoded_data[1 : (s + 1)] = tensor
# def _decode(encoded_data):
# size = encoded_data[0]
# encoded_tensor = encoded_data[1 : (size + 1)].to("cpu")
# return pickle.loads(bytearray(encoded_tensor.tolist()))
# # TODO try to use tensor in shared-memory instead of serializing to disk
# # this involves getting the all_gather to work
# def scatter_gather(data):
# """
# This function gathers data from multiple processes, and returns them
# in a list, as they were obtained from each process.
# This function is useful for retrieving data from multiple processes,
# when launching the code with torch.distributed.launch
# Note: this function is slow and should not be used in tight loops, i.e.,
# do not use it in the training loop.
# Arguments:
# data: the object to be gathered from multiple processes.
# It must be serializable
# Returns:
# result (list): a list with as many elements as there are processes,
# where each element i in the list corresponds to the data that was
# gathered from the process of rank i.
# """
# # strategy: the main process creates a temporary directory, and communicates
# # the location of the temporary directory to all other processes.
# # each process will then serialize the data to the folder defined by
# # the main process, and then the main process reads all of the serialized
# # files and returns them in a list
# if not dist.is_initialized():
# return [data]
# synchronize()
# # get rank of the current process
# rank = dist.get_rank()
# # the data to communicate should be small
# data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda")
# if rank == 0:
# # manually creates a temporary directory, that needs to be cleaned
# # afterwards
# tmp_dir = tempfile.mkdtemp()
# _encode(data_to_communicate, tmp_dir)
# synchronize()
# # the main process (rank=0) communicates the data to all processes
# dist.broadcast(data_to_communicate, 0)
# # get the data that was communicated
# tmp_dir = _decode(data_to_communicate)
# # each process serializes to a different file
# file_template = "file{}.pth"
# tmp_file = os.path.join(tmp_dir, file_template.format(rank))
# torch.save(data, tmp_file)
# # synchronize before loading the data
# synchronize()
# # only the master process returns the data
# if rank == 0:
# data_list = []
# world_size = dist.get_world_size()
# for r in range(world_size):
# file_path = os.path.join(tmp_dir, file_template.format(r))
# d = torch.load(file_path)
# data_list.append(d)
# # cleanup
# os.remove(file_path)
# # cleanup
# os.rmdir(tmp_dir)
# return data_list
# def get_world_size():
# if not dist.is_available():
# print('distributed is not available')
# return 1
# if not dist.is_initialized():
# print('distributed is not initialized')
# return 1
# return dist.get_world_size()
# def get_rank():
# if not dist.is_available():
# return 0
# if not dist.is_initialized():
# return 0
# return dist.get_rank()
# def is_main_process():
# return get_rank() == 0
# def synchronize():
# """
# Helper function to synchronize (barrier) among all processes when
# using distributed training
# """
# if not dist.is_available():
# return
# if not dist.is_initialized():
# return
# world_size = dist.get_world_size()
# if world_size == 1:
# return
# dist.barrier()
# def all_gather(data):
# """
# Run all_gather on arbitrary picklable data (not necessarily tensors)
# Args:
# data: any picklable object
# Returns:
# list[data]: list of data gathered from each rank
# """
# world_size = get_world_size()
# if world_size == 1:
# return [data]
# # serialized to a Tensor
# buffer = pickle.dumps(data)
# storage = torch.ByteStorage.from_buffer(buffer)
# tensor = torch.ByteTensor(storage).to("cuda")
# # obtain Tensor size of each rank
# local_size = torch.IntTensor([tensor.numel()]).to("cuda")
# size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
# dist.all_gather(size_list, local_size)
# size_list = [int(size.item()) for size in size_list]
# max_size = max(size_list)
# # receiving Tensor from all ranks
# # we pad the tensor because torch all_gather does not support
# # gathering tensors of different shapes
# tensor_list = []
# for _ in size_list:
# tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
# if local_size != max_size:
# padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
# tensor = torch.cat((tensor, padding), dim=0)
# dist.all_gather(tensor_list, tensor)
# data_list = []
# for size, tensor in zip(size_list, tensor_list):
# buffer = tensor.cpu().numpy().tobytes()[:size]
# data_list.append(pickle.loads(buffer))
# return data_list
# def reduce_dict(input_dict, average=True):
# """
# Args:
# input_dict (dict): all the values will be reduced
# average (bool): whether to do average or sum
# Reduce the values in the dictionary from all processes so that process with rank
# 0 has the averaged results. Returns a dict with the same fields as
# input_dict, after reduction.
# """
# world_size = get_world_size()
# if world_size < 2:
# return input_dict
# with torch.no_grad():
# names = []
# values = []
# # sort the keys so that they are consistent across processes
# for k in sorted(input_dict.keys()):
# names.append(k)
# values.append(input_dict[k])
# values = torch.stack(values, dim=0)
# dist.reduce(values, dst=0)
# if dist.get_rank() == 0 and average:
# # only main process gets accumulated, so only divide by
# # world_size in this case
# values /= world_size
# reduced_dict = {k: v for k, v in zip(names, values)}
# return reduced_dict
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def scatter_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 11,687 | 29.83905 | 86 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/registry.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
def _register_generic(module_dict, module_name, module):
assert module_name not in module_dict
module_dict[module_name] = module
class Registry(dict):
'''
A helper class for managing registering modules, it extends a dictionary
and provides a register functions.
Eg. creeting a registry:
some_registry = Registry({"default": default_module})
There're two ways of registering new modules:
1): normal way is just calling register function:
def foo():
...
some_registry.register("foo_module", foo)
2): used as decorator when declaring the module:
@some_registry.register("foo_module")
@some_registry.register("foo_modeul_nickname")
def foo():
...
Access of module is just like using a dictionary, eg:
f = some_registry["foo_modeul"]
'''
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module_name, module=None):
# used as function call
if module is not None:
_register_generic(self, module_name, module)
return
# used as decorator
def register_fn(fn):
_register_generic(self, module_name, fn)
return fn
return register_fn | 1,384 | 29.777778 | 76 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/model_zoo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from maskrcnn_benchmark.utils.comm import is_main_process
from maskrcnn_benchmark.utils.comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file | 3,044 | 48.918033 | 135 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/logging.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import sys
from tensorboardX import SummaryWriter
def setup_logger(name, save_dir, distributed_rank=0):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class Logger(object):
def __init__(self, log_dir, distributed_rank=0):
"""Create a summary writer logging to log_dir."""
self.distributed_rank = distributed_rank
if distributed_rank == 0:
self.writer = SummaryWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if self.distributed_rank == 0:
self.writer.add_scalar(tag, value, step)
| 1,252 | 28.833333 | 84 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/collect_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
| 338 | 21.6 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/model_serialization.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import logging
import torch
from maskrcnn_benchmark.utils.imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(
log_str_template.format(
key,
max_size,
key_old,
max_size_loaded,
tuple(loaded_state_dict[key_old].shape),
)
)
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict)
| 3,464 | 41.777778 | 91 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/chars.py | import os
import cv2
import numpy as np
def char2num(char):
if char in "0123456789":
num = ord(char) - ord("0") + 1
elif char in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
num = ord(char.lower()) - ord("a") + 11
else:
num = 0
return num
def num2char(num):
chars = "_0123456789abcdefghijklmnopqrstuvwxyz"
char = chars[num]
# if num >=1 and num <=10:
# char = chr(ord('0') + num - 1)
# elif num > 10 and num <= 36:
# char = chr(ord('a') + num - 11)
# else:
# print('error number:%d'%(num))
# exit()
return char
def getstr_grid(seg, box, threshold=192):
pos = 255 - (seg[0] * 255).astype(np.uint8)
mask_index = np.argmax(seg, axis=0)
mask_index = mask_index.astype(np.uint8)
pos = pos.astype(np.uint8)
string, score, rec_scores, char_polygons = seg2text(
pos, mask_index, seg, box, threshold=threshold
)
return string, score, rec_scores, char_polygons
def seg2text(gray, mask, seg, box, threshold=192):
## input numpy
img_h, img_w = gray.shape
box_w = box[2] - box[0]
box_h = box[3] - box[1]
ratio_h = float(box_h) / img_h
ratio_w = float(box_w) / img_w
# SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
# gray = cv2.erode(gray,SE1)
# gray = cv2.dilate(gray,SE1)
# gray = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,SE1)
ret, thresh = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
try:
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
chars = []
scores = []
char_polygons = []
for i in range(len(contours)):
char = {}
temp = np.zeros((img_h, img_w)).astype(np.uint8)
cv2.drawContours(temp, [contours[i]], 0, (255), -1)
x, y, w, h = cv2.boundingRect(contours[i])
c_x, c_y = x + w / 2, y + h / 2
perimeter = cv2.arcLength(contours[i], True)
epsilon = 0.01 * cv2.arcLength(contours[i], True)
approx = cv2.approxPolyDP(contours[i], epsilon, True)
pts = approx.reshape((-1, 2))
pts[:, 0] = pts[:, 0] * ratio_w + box[0]
pts[:, 1] = pts[:, 1] * ratio_h + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon) >= 6:
char_polygons.append(polygon)
# x1 = x * ratio_w + box[0]
# y1 = y * ratio_h + box[1]
# x3 = (x + w) * ratio_w + box[0]
# y3 = (y + h) * ratio_h + box[1]
# polygon = [x1, y1, x3, y1, x3, y3, x1, y3]
regions = seg[1:, temp == 255].reshape((36, -1))
cs = np.mean(regions, axis=1)
sym = num2char(np.argmax(cs.reshape((-1))) + 1)
char["x"] = c_x
char["y"] = c_y
char["s"] = sym
char["cs"] = cs.reshape((-1, 1))
scores.append(np.max(char["cs"], axis=0)[0])
chars.append(char)
chars = sorted(chars, key=lambda x: x["x"])
string = ""
css = []
for char in chars:
string = string + char["s"]
css.append(char["cs"])
if len(scores) > 0:
score = sum(scores) / len(scores)
else:
score = 0.00
if not css:
css = [0.0]
return string, score, np.hstack(css), char_polygons
# def get_tight_rect(points, start_x, start_y, image_height, image_width, scale):
# points = list(points)
# ps = sorted(points, key=lambda x: x[0])
#
# if ps[1][1] > ps[0][1]:
# px1 = ps[0][0] * scale + start_x
# py1 = ps[0][1] * scale + start_y
# px4 = ps[1][0] * scale + start_x
# py4 = ps[1][1] * scale + start_y
# else:
# px1 = ps[1][0] * scale + start_x
# py1 = ps[1][1] * scale + start_y
# px4 = ps[0][0] * scale + start_x
# py4 = ps[0][1] * scale + start_y
# if ps[3][1] > ps[2][1]:
# px2 = ps[2][0] * scale + start_x
# py2 = ps[2][1] * scale + start_y
# px3 = ps[3][0] * scale + start_x
# py3 = ps[3][1] * scale + start_y
# else:
# px2 = ps[3][0] * scale + start_x
# py2 = ps[3][1] * scale + start_y
# px3 = ps[2][0] * scale + start_x
# py3 = ps[2][1] * scale + start_y
#
# if px1 < 0:
# px1 = 1
# if px1 > image_width:
# px1 = image_width - 1
# if px2 < 0:
# px2 = 1
# if px2 > image_width:
# px2 = image_width - 1
# if px3 < 0:
# px3 = 1
# if px3 > image_width:
# px3 = image_width - 1
# if px4 < 0:
# px4 = 1
# if px4 > image_width:
# px4 = image_width - 1
#
# if py1 < 0:
# py1 = 1
# if py1 > image_height:
# py1 = image_height - 1
# if py2 < 0:
# py2 = 1
# if py2 > image_height:
# py2 = image_height - 1
# if py3 < 0:
# py3 = 1
# if py3 > image_height:
# py3 = image_height - 1
# if py4 < 0:
# py4 = 1
# if py4 > image_height:
# py4 = image_height - 1
# return [px1, py1, px2, py2, px3, py3, px4, py4]
def get_tight_rect(points, start_x, start_y, image_height, image_width, scale):
points = list(points)
ps = sorted(points, key=lambda x: x[0])
if ps[1][1] > ps[0][1]:
px1 = ps[0][0] * scale + start_x
py1 = ps[0][1] * scale + start_y
px4 = ps[1][0] * scale + start_x
py4 = ps[1][1] * scale + start_y
else:
px1 = ps[1][0] * scale + start_x
py1 = ps[1][1] * scale + start_y
px4 = ps[0][0] * scale + start_x
py4 = ps[0][1] * scale + start_y
if ps[3][1] > ps[2][1]:
px2 = ps[2][0] * scale + start_x
py2 = ps[2][1] * scale + start_y
px3 = ps[3][0] * scale + start_x
py3 = ps[3][1] * scale + start_y
else:
px2 = ps[3][0] * scale + start_x
py2 = ps[3][1] * scale + start_y
px3 = ps[2][0] * scale + start_x
py3 = ps[2][1] * scale + start_y
px1 = min(max(px1, 1), image_width - 1)
px2 = min(max(px2, 1), image_width - 1)
px3 = min(max(px3, 1), image_width - 1)
px4 = min(max(px4, 1), image_width - 1)
py1 = min(max(py1, 1), image_height - 1)
py2 = min(max(py2, 1), image_height - 1)
py3 = min(max(py3, 1), image_height - 1)
py4 = min(max(py4, 1), image_height - 1)
return [px1, py1, px2, py2, px3, py3, px4, py4]
| 6,419 | 31.1 | 89 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/__init__.py | 0 | 0 | 0 | py | |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/miscellaneous.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import errno
import os
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| 228 | 18.083333 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from maskrcnn_benchmark.utils.imports import import_file
def setup_environment():
"""Perform environment setup work. The default setup is a no-op, but this
function allows the user to specify a Python source file that performs
custom setup work that may be necessary to their computing environment.
"""
custom_module_path = os.environ.get("TORCH_DETECTRON_ENV_MODULE")
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
# The default setup is a no-op
pass
def setup_custom_environment(custom_module_path):
"""Load custom environment setup from a Python source file and run the setup
function.
"""
module = import_file("maskrcnn_benchmark.utils.env.custom_module", custom_module_path)
assert hasattr(module, "setup_environment") and callable(
module.setup_environment
), (
"Custom environment module defined in {} does not have the "
"required callable attribute 'setup_environment'."
).format(
custom_module_path
)
module.setup_environment()
# Force environment setup when this module is imported
setup_environment()
| 1,249 | 31.894737 | 90 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/imports.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import importlib
import importlib.util
import sys
# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
| 598 | 38.933333 | 164 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_data_loader
| 108 | 35.333333 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/collate_batch.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.structures.image_list import to_image_list, to_image_target_list
class BatchCollator(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
images = to_image_list(transposed_batch[0], self.size_divisible)
targets = transposed_batch[1]
img_ids = transposed_batch[2]
# if transposed_batch[1] is None:
# images = to_image_list(transposed_batch[0], self.size_divisible)
# targets = transposed_batch[1]
# img_ids = transposed_batch[2]
# else:
# images, targets = to_image_target_list(transposed_batch[0], self.size_divisible, transposed_batch[1])
# img_ids = transposed_batch[2]
return images, targets, img_ids
| 1,080 | 37.607143 | 115 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import logging
import torch.utils.data
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.imports import import_file
from . import datasets as D
from . import samplers
from .collate_batch import BatchCollator
from .transforms import build_transforms
def build_dataset(cfg, dataset_list, transforms, dataset_catalog, is_train=True):
"""
Arguments:
dataset_list (list[str]): Contains the names of the datasets, i.e.,
coco_2014_trian, coco_2014_val, etc
transforms (callable): transforms to apply to each (image, target) sample
dataset_catalog (DatasetCatalog): contains the information on how to
construct a dataset.
is_train (bool): whether to setup the dataset for training or testing
"""
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list))
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
# for COCODataset, we want to remove images without annotations
# during training
if data["factory"] == "COCODataset":
args["remove_images_without_annotations"] = is_train
args["transforms"] = transforms
args["ignore_difficult"] = cfg.DATASETS.IGNORE_DIFFICULT
# make dataset from factory
dataset = factory(**args)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.MixDataset(datasets, cfg.DATASETS.RATIOS)
# dataset = D.ConcatDataset(datasets)
return [dataset]
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _quantize(x, bins):
bins = sorted(bins.copy())
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info["height"]) / float(img_info["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0
):
if aspect_grouping:
if not isinstance(aspect_grouping, (list, tuple)):
aspect_grouping = [aspect_grouping]
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, images_per_batch, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=False
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
return batch_sampler
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = True
num_iters = cfg.SOLVER.MAX_ITER
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = False if not is_distributed else True
num_iters = None
start_iter = 0
if images_per_gpu > 1:
logger = logging.getLogger(__name__)
logger.warning(
"When using more than one image per GPU you may encounter "
"an out-of-memory (OOM) error if your GPU does not have "
"sufficient memory. If this happens, you can reduce "
"SOLVER.IMS_PER_BATCH (for training) or "
"TEST.IMS_PER_BATCH (for inference). For training, you must "
"also adjust the learning rate and schedule length according "
"to the linear scaling rule. See for example: "
"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
)
# group images which have similar aspect ratio. In this case, we only
# group in two cases: those with width / height > 1, and the other way around,
# but the code supports more general grouping strategy
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg,dataset_list, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
'''
for i in range(20):
a=dataset[i]
ipdb.set_trace()
'''
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
| 6,740 | 37.301136 | 143 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.