repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
pylops | pylops-master/pylops/signalprocessing/fredholm1.py | __all__ = ["Fredholm1"]
import numpy as np
from pylops import LinearOperator
from pylops.utils.backend import get_array_module
from pylops.utils.decorators import reshaped
from pylops.utils.typing import DTypeLike, NDArray
class Fredholm1(LinearOperator):
r"""Fredholm integral of first kind.
Implement a multi-dimensional Fredholm integral of first kind. Note that if
the integral is two dimensional, this can be directly implemented using
:class:`pylops.basicoperators.MatrixMult`. A multi-dimensional
Fredholm integral can be performed as a :class:`pylops.basicoperators.BlockDiag`
operator of a series of :class:`pylops.basicoperators.MatrixMult`. However,
here we take advantage of the structure of the kernel and perform it in a
more efficient manner.
Parameters
----------
G : :obj:`numpy.ndarray`
Multi-dimensional convolution kernel of size
:math:`[n_{\text{slice}} \times n_x \times n_y]`
nz : :obj:`int`, optional
Additional dimension of model
saveGt : :obj:`bool`, optional
Save ``G`` and ``G.H`` to speed up the computation of adjoint
(``True``) or create ``G.H`` on-the-fly (``False``)
Note that ``saveGt=True`` will double the amount of required memory
usematmul : :obj:`bool`, optional
Use :func:`numpy.matmul` (``True``) or for-loop with :func:`numpy.dot`
(``False``). As it is not possible to define which approach is more
performant (this is highly dependent on the size of ``G`` and input
arrays as well as the hardware used in the computation), we advise users
to time both methods for their specific problem prior to making a
choice.
dtype : :obj:`str`, optional
Type of elements in input array.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved
explicitly (``True``) or not (``False``)
Notes
-----
A multi-dimensional Fredholm integral of first kind can be expressed as
.. math::
d(k, x, z) = \int{G(k, x, y) m(k, y, z) \,\mathrm{d}y}
\quad \forall k=1,\ldots,n_{slice}
on the other hand its adjoin is expressed as
.. math::
m(k, y, z) = \int{G^*(k, y, x) d(k, x, z) \,\mathrm{d}x}
\quad \forall k=1,\ldots,n_{\text{slice}}
In discrete form, this operator can be seen as a block-diagonal
matrix multiplication:
.. math::
\begin{bmatrix}
\mathbf{G}_{k=1} & \mathbf{0} & \ldots & \mathbf{0} \\
\mathbf{0} & \mathbf{G}_{k=2} & \ldots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{0} & \mathbf{0} & \ldots & \mathbf{G}_{k=N}
\end{bmatrix}
\begin{bmatrix}
\mathbf{m}_{k=1} \\
\mathbf{m}_{k=2} \\
\vdots \\
\mathbf{m}_{k=N}
\end{bmatrix}
"""
def __init__(
self,
G: NDArray,
nz: int = 1,
saveGt: bool = True,
usematmul: bool = True,
dtype: DTypeLike = "float64",
name: str = "F",
) -> None:
self.nz = nz
self.nsl, self.nx, self.ny = G.shape
dims = (self.nsl, self.ny, self.nz)
dimsd = (self.nsl, self.nx, self.nz)
super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name)
self.G = G
if saveGt:
self.GT = G.transpose((0, 2, 1)).conj()
self.usematmul = usematmul
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
ncp = get_array_module(x)
x = x.squeeze()
if self.usematmul:
if self.nz == 1:
x = x[..., ncp.newaxis]
y = ncp.matmul(self.G, x)
else:
y = ncp.squeeze(ncp.zeros((self.nsl, self.nx, self.nz), dtype=self.dtype))
for isl in range(self.nsl):
y[isl] = ncp.dot(self.G[isl], x[isl])
return y
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
ncp = get_array_module(x)
x = x.squeeze()
if self.usematmul:
if self.nz == 1:
x = x[..., ncp.newaxis]
if hasattr(self, "GT"):
y = ncp.matmul(self.GT, x)
else:
# y = ncp.matmul(self.G.transpose((0, 2, 1)).conj(), x)
y = (
ncp.matmul(x.transpose(0, 2, 1).conj(), self.G)
.transpose(0, 2, 1)
.conj()
)
else:
y = ncp.squeeze(ncp.zeros((self.nsl, self.ny, self.nz), dtype=self.dtype))
if hasattr(self, "GT"):
for isl in range(self.nsl):
y[isl] = ncp.dot(self.GT[isl], x[isl])
else:
for isl in range(self.nsl):
# y[isl] = ncp.dot(self.G[isl].conj().T, x[isl])
y[isl] = ncp.dot(x[isl].T.conj(), self.G[isl]).T.conj()
return y.ravel()
| 5,263 | 34.093333 | 86 | py |
pylops | pylops-master/pylops/signalprocessing/sliding3d.py | __all__ = [
"sliding3d_design",
"Sliding3D",
]
import logging
from typing import Tuple
from pylops import LinearOperator
from pylops.basicoperators import BlockDiag, Diagonal, HStack, Restriction
from pylops.signalprocessing.sliding2d import _slidingsteps
from pylops.utils.tapers import taper3d
from pylops.utils.typing import InputDimsLike, NDArray
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
def sliding3d_design(
dimsd: Tuple[int, int, int],
nwin: Tuple[int, int],
nover: Tuple[int, int],
nop: Tuple[int, int, int],
) -> Tuple[
Tuple[int, int],
Tuple[int, int, int],
Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]],
Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]],
]:
"""Design Sliding3D operator
This routine can be used prior to creating the :class:`pylops.signalprocessing.Sliding3D`
operator to identify the correct number of windows to be used based on the dimension of the data (``dimsd``),
dimension of the window (``nwin``), overlap (``nover``),a and dimension of the operator acting in the model
space.
Parameters
----------
dimsd : :obj:`tuple`
Shape of 2-dimensional data.
nwin : :obj:`tuple`
Number of samples of window.
nover : :obj:`tuple`
Number of samples of overlapping part of window.
nop : :obj:`tuple`
Size of model in the transformed domain.
Returns
-------
nwins : :obj:`tuple`
Number of windows.
dims : :obj:`tuple`
Shape of 2-dimensional model.
mwins_inends : :obj:`tuple`
Start and end indices for model patches (stored as tuple of tuples).
dwins_inends : :obj:`tuple`
Start and end indices for data patches (stored as tuple of tuples).
"""
# data windows
dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0])
dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1])
dwins_inends = ((dwin0_ins, dwin0_ends), (dwin1_ins, dwin1_ends))
nwins0 = len(dwin0_ins)
nwins1 = len(dwin1_ins)
nwins = (nwins0, nwins1)
# model windows
dims = (nwins0 * nop[0], nwins1 * nop[1], nop[2])
mwin0_ins, mwin0_ends = _slidingsteps(dims[0], nop[0], 0)
mwin1_ins, mwin1_ends = _slidingsteps(dims[1], nop[1], 0)
mwins_inends = ((mwin0_ins, mwin0_ends), (mwin1_ins, mwin1_ends))
# print information about patching
logging.warning("%d-%d windows required...", nwins0, nwins1)
logging.warning(
"data wins - start:%s, end:%s / start:%s, end:%s",
dwin0_ins,
dwin0_ends,
dwin1_ins,
dwin1_ends,
)
logging.warning(
"model wins - start:%s, end:%s / start:%s, end:%s",
mwin0_ins,
mwin0_ends,
mwin1_ins,
mwin1_ends,
)
return nwins, dims, mwins_inends, dwins_inends
def Sliding3D(
Op: LinearOperator,
dims: InputDimsLike,
dimsd: InputDimsLike,
nwin: Tuple[int, int],
nover: Tuple[int, int],
nop: Tuple[int, int, int],
tapertype: str = "hanning",
nproc: int = 1,
name: str = "P",
) -> LinearOperator:
"""3D Sliding transform operator.w
Apply a transform operator ``Op`` repeatedly to patches of the model
vector in forward mode and patches of the data vector in adjoint mode.
More specifically, in forward mode the model vector is divided into patches
each patch is transformed, and patches are then recombined in a sliding
window fashion. Both model and data should be 3-dimensional
arrays in nature as they are internally reshaped and interpreted as
3-dimensional arrays. Each patch contains in fact a portion of the
array in the first and second dimensions (and the entire third dimension).
This operator can be used to perform local, overlapping transforms (e.g.,
:obj:`pylops.signalprocessing.FFTND`
or :obj:`pylops.signalprocessing.Radon3D`) of 3-dimensional arrays.
.. note:: The shape of the model has to be consistent with
the number of windows for this operator not to return an error. As the
number of windows depends directly on the choice of ``nwin`` and
``nover``, it is recommended to first run ``sliding3d_design`` to obtain
the corresponding ``dims`` and number of windows.
.. warning:: Depending on the choice of `nwin` and `nover` as well as the
size of the data, sliding windows may not cover the entire data.
The start and end indices of each window will be displayed and returned
with running ``sliding3d_design``.
Parameters
----------
Op : :obj:`pylops.LinearOperator`
Transform operator
dims : :obj:`tuple`
Shape of 3-dimensional model. Note that ``dims[0]`` and ``dims[1]``
should be multiple of the model sizes of the transform in the
first and second dimensions
dimsd : :obj:`tuple`
Shape of 3-dimensional data
nwin : :obj:`tuple`
Number of samples of window
nover : :obj:`tuple`
Number of samples of overlapping part of window
nop : :obj:`tuple`
Number of samples in axes of transformed domain associated
to spatial axes in the data
tapertype : :obj:`str`, optional
Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``)
nproc : :obj:`int`, optional
Number of processes used to evaluate the N operators in parallel
using ``multiprocessing``. If ``nproc=1``, work in serial mode.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Returns
-------
Sop : :obj:`pylops.LinearOperator`
Sliding operator
Raises
------
ValueError
Identified number of windows is not consistent with provided model
shape (``dims``).
"""
# data windows
dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0])
dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1])
nwins0 = len(dwin0_ins)
nwins1 = len(dwin1_ins)
nwins = nwins0 * nwins1
# check windows
if nwins * Op.shape[1] // dims[2] != dims[0] * dims[1]:
raise ValueError(
f"Model shape (dims={dims}) is not consistent with chosen "
f"number of windows. Run sliding3d_design to identify the "
f"correct number of windows for the current "
"model size..."
)
# create tapers
if tapertype is not None:
tap = taper3d(dimsd[2], nwin, nover, tapertype=tapertype)
# transform to apply
if tapertype is None:
OOp = BlockDiag([Op for _ in range(nwins)], nproc=nproc)
else:
OOp = BlockDiag([Diagonal(tap.ravel()) * Op for _ in range(nwins)], nproc=nproc)
hstack = HStack(
[
Restriction(
(nwin[0], dimsd[1], dimsd[2]),
range(win_in, win_end),
axis=1,
dtype=Op.dtype,
).H
for win_in, win_end in zip(dwin1_ins, dwin1_ends)
]
)
combining1 = BlockDiag([hstack] * nwins0)
combining0 = HStack(
[
Restriction(
dimsd,
range(win_in, win_end),
axis=0,
dtype=Op.dtype,
).H
for win_in, win_end in zip(dwin0_ins, dwin0_ends)
]
)
Sop = LinearOperator(combining0 * combining1 * OOp)
Sop.dims, Sop.dimsd = (
nwins0,
nwins1,
int(dims[0] // nwins0),
int(dims[1] // nwins1),
dims[2],
), dimsd
Sop.name = name
return Sop
| 7,683 | 32.701754 | 113 | py |
pylops | pylops-master/pylops/signalprocessing/_radon2d_numba.py | import os
import numpy as np
from numba import jit
# detect whether to use parallel or not
numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1"))
parallel = True if numba_threads != 1 else False
@jit(nopython=True)
def _linear_numba(x, t, px):
return t + px * x
@jit(nopython=True)
def _parabolic_numba(x, t, px):
return t + px * x**2
@jit(nopython=True)
def _hyperbolic_numba(x, t, px):
return np.sqrt(t**2 + (x / px) ** 2)
@jit(nopython=True, nogil=True)
def _indices_2d_numba(f, x, px, t, nt, interp=True):
"""Compute time and space indices of parametric line in ``f`` function
using numba. Refer to ``_indices_2d`` for full documentation.
"""
tdecscan = f(x, t, px)
if not interp:
xscan = (tdecscan >= 0) & (tdecscan < nt)
else:
xscan = (tdecscan >= 0) & (tdecscan < nt - 1)
tscanfs = tdecscan[xscan]
tscan = np.zeros(len(tscanfs))
dtscan = np.zeros(len(tscanfs))
for it, tscanf in enumerate(tscanfs):
tscan[it] = int(tscanf)
if interp:
dtscan[it] = tscanf - tscan[it]
return xscan, tscan, dtscan
@jit(nopython=True, parallel=parallel, nogil=True)
def _indices_2d_onthefly_numba(f, x, px, ip, t, nt, interp=True):
"""Wrapper around _indices_2d to allow on-the-fly computation of
parametric curves using numba
"""
tscan = np.full(len(x), np.nan, dtype=np.float32)
if interp:
dtscan = np.full(len(x), np.nan)
else:
dtscan = None
xscan, tscan1, dtscan1 = _indices_2d_numba(f, x, px[ip], t, nt, interp=interp)
tscan[xscan] = tscan1
if interp:
dtscan[xscan] = dtscan1
return xscan, tscan, dtscan
@jit(nopython=True, parallel=parallel, nogil=True)
def _create_table_numba(f, x, pxaxis, nt, npx, nx, interp):
"""Create look up table using numba"""
table = np.full((npx, nt, nx), np.nan, dtype=np.float32)
dtable = np.full((npx, nt, nx), np.nan)
for ipx in range(npx):
px = pxaxis[ipx]
for it in range(nt):
xscans, tscan, dtscan = _indices_2d_numba(f, x, px, it, nt, interp=interp)
itscan = 0
for ixscan, xscan in enumerate(xscans):
if xscan:
table[ipx, it, ixscan] = tscan[itscan]
if interp:
dtable[ipx, it, ixscan] = dtscan[itscan]
itscan += 1
return table, dtable
| 2,416 | 28.839506 | 86 | py |
pylops | pylops-master/pylops/signalprocessing/convolvend.py | __all__ = ["ConvolveND"]
from typing import Optional, Union
import numpy as np
from numpy.core.multiarray import normalize_axis_index
from pylops import LinearOperator
from pylops.utils._internal import _value_or_sized_to_tuple
from pylops.utils.backend import (
get_array_module,
get_convolve,
get_correlate,
to_cupy_conditional,
)
from pylops.utils.decorators import reshaped
from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray
class ConvolveND(LinearOperator):
r"""ND convolution operator.
Apply n-dimensional convolution with a compact filter to model
(and data) along the ``axes`` of a n-dimensional array.
Parameters
----------
dims : :obj:`list` or :obj:`int`
Number of samples for each dimension
h : :obj:`numpy.ndarray`
nd compact filter to be convolved to input signal
offset : :obj:`tuple`, optional
Indices of the center of the compact filter
axes : :obj:`int`, optional
.. versionadded:: 2.0.0
Axes along which convolution is applied
method : :obj:`str`, optional
Method used to calculate the convolution (``direct`` or ``fft``).
dtype : :obj:`str`, optional
Type of elements in input array.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved
explicitly (``True``) or not (``False``)
Notes
-----
The ConvolveND operator applies n-dimensional convolution
between the input signal :math:`d(x_1, x_2, ..., x_N)` and a compact
filter kernel :math:`h(x_1, x_2, ..., x_N)` in forward model. This is a
straighforward extension to multiple dimensions of
:obj:`pylops.signalprocessing.Convolve2D` operator.
"""
def __init__(
self,
dims: Union[int, InputDimsLike],
h: NDArray,
offset: Optional[InputDimsLike] = None,
axes: InputDimsLike = (-2, -1),
method: str = "fft",
dtype: DTypeLike = "float64",
name: str = "C",
):
dims = _value_or_sized_to_tuple(dims)
super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name)
self.axes = (
np.arange(len(self.dims))
if axes is None
else np.array([normalize_axis_index(ax, len(self.dims)) for ax in axes])
)
self.h = h
hshape = np.array(self.h.shape)
# padding
if offset is None:
offsetpad = np.zeros(self.h.ndim, dtype=int)
else:
offsetpad = np.asarray(offset, dtype=int)
self.offset = 2 * (hshape // 2 - offsetpad)
pad = [(0, 0) for _ in range(self.h.ndim)]
dopad = False
for inh, nh in enumerate(hshape):
if nh % 2 == 0:
self.offset[inh] -= 1
if self.offset[inh] != 0:
pad[inh] = (
self.offset[inh] if self.offset[inh] > 0 else 0,
-self.offset[inh] if self.offset[inh] < 0 else 0,
)
dopad = True
if dopad:
ncp = get_array_module(h)
self.h = ncp.pad(self.h, pad, mode="constant")
self.nh = self.h.shape
# find out which directions are used for convolution and define offsets
if len(self.dims) != len(self.nh):
dimsh = np.ones(len(self.dims), dtype=int)
for iax, ax in enumerate(self.axes):
dimsh[ax] = self.nh[iax]
self.h = self.h.reshape(dimsh)
# convolve and correlate functions
self.convolve = get_convolve(h)
self.correlate = get_correlate(h)
self.method = method
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
# correct type of h if different from x and choose methods accordingly
if type(self.h) is not type(x):
self.h = to_cupy_conditional(x, self.h)
self.convolve = get_convolve(self.h)
self.correlate = get_correlate(self.h)
return self.convolve(x, self.h, mode="same", method=self.method)
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
# correct type of h if different from x and choose methods accordingly
if type(self.h) is not type(x):
self.h = to_cupy_conditional(x, self.h)
self.convolve = get_convolve(self.h)
self.correlate = get_correlate(self.h)
return self.correlate(x, self.h, mode="same", method=self.method)
| 4,683 | 33.189781 | 84 | py |
pylops | pylops-master/pylops/signalprocessing/__init__.py | """
Signal processing
=================
The subpackage signalprocessing provides linear operators for several signal
processing algorithms with forward and adjoint functionalities.
A list of operators present in pylops.signalprocessing:
Convolve1D 1D convolution operator.
Convolve2D 2D convolution operator.
ConvolveND ND convolution operator.
NonStationaryConvolve1D 1D nonstationary convolution operator.
NonStationaryConvolve2D 2D nonstationary convolution operator.
NonStationaryFilters1D 1D nonstationary filter estimation operator.
NonStationaryFilters2D 2D nonstationary filter estimation operator.
Interp Interpolation operator.
Bilinear Bilinear interpolation operator.
FFT One dimensional Fast-Fourier Transform.
FFT2D Two dimensional Fast-Fourier Transform.
FFTND N-dimensional Fast-Fourier Transform.
Shift Fractional Shift operator.
DWT One dimensional Wavelet operator.
DWT2D Two dimensional Wavelet operator.
DCT Discrete Cosine Transform.
Seislet Two dimensional Seislet operator.
Radon2D Two dimensional Radon transform.
Radon3D Three dimensional Radon transform.
Sliding1D 1D Sliding transform operator.
Sliding2D 2D Sliding transform operator.
Sliding3D 3D Sliding transform operator.
Patch2D 2D Patching transform operator.
Patch3D 3D Patching transform operator.
Fredholm1 Fredholm integral of first kind.
"""
from .fft import *
from .fft2d import *
from .fftnd import *
from .convolve1d import *
from .convolvend import *
from .convolve2d import *
from .nonstatconvolve1d import *
from .nonstatconvolve2d import *
from .shift import *
from .interp import *
from .bilinear import *
from .radon2d import *
from .radon3d import *
from .chirpradon2d import *
from .chirpradon3d import *
from .sliding1d import *
from .sliding2d import *
from .sliding3d import *
from .patch2d import *
from .patch3d import *
from .fredholm1 import *
from .dwt import *
from .dwt2d import *
from .seislet import *
from .dct import *
__all__ = [
"FFT",
"FFT2D",
"FFTND",
"Shift",
"Convolve1D",
"ConvolveND",
"Convolve2D",
"NonStationaryConvolve1D",
"NonStationaryConvolve2D",
"NonStationaryFilters1D",
"NonStationaryFilters2D",
"Interp",
"Bilinear",
"Radon2D",
"Radon3D",
"ChirpRadon2D",
"ChirpRadon3D",
"Sliding1D",
"Sliding2D",
"Sliding3D",
"Patch2D",
"Patch3D",
"Fredholm1",
"DWT",
"DWT2D",
"Seislet",
"DCT",
]
| 3,069 | 32.010753 | 80 | py |
pylops | pylops-master/pylops/signalprocessing/_chirpradon3d.py | import numpy as np
from pylops.utils.backend import get_array_module
from pylops.utils.typing import NDArray
try:
import pyfftw
except ImportError:
pyfftw = None
def _chirp_radon_3d(
data: NDArray, dt: float, dy: float, dx: float, pmax: NDArray, mode: str = "f"
) -> NDArray:
r"""3D Chirp Radon transform
Applies 3D Radon transform using Fast Fourier Transform and Chirp
functions. (mode='f': forward, 'a': adjoint, and 'i': inverse). See
Chirp3DRadon operator docstring for more details.
Parameters
----------
data : :obj:`np.ndarray`
3D input data of size :math:`[n_y \times n_x \times n_t]`
dt : :obj:`float`
Time sampling :math:`dt`
dy : :obj:`float`
Spatial sampling in :math:`y` direction :math:`dy`
dx : :obj:`float`
Spatial sampling in :math:`x` direction :math:`dx`
pmax : :obj:`np.ndarray`
Two element array :math:`(p_y_{max}, p_x_{max})` of :math:`\tan`
of maximum stacking angles in :math:`y` and :math:`x` directions
:math:`(\tan(\alpha_{y,max}), \tan(\alpha_{x,max}))`. If one operates
in terms of minimum velocity :math:`c_0`, then
:math:`p_{y.max}=c_0dy/dt` and :math:`p_{x,max}=c_0dx/dt`
mode : :obj:`str`, optional
Mode of operation, 'f': forward, 'a': adjoint, and 'i': inverse
Returns
-------
g : :obj:`np.ndarray`
3D array of size :math:`[n_{y} \times n_{x} \times n_t]`
"""
ncp = get_array_module(data)
# define sign for mode
sign = -1.0 if mode == "f" else 1.0
# data size
(ny, nx, nt) = data.shape
# find dtype of input
dtype = ncp.real(data).dtype
cdtype = (ncp.ones(1, dtype=dtype) + 1j * ncp.ones(1, dtype=dtype)).dtype
# frequency axis
omega = (ncp.fft.fftfreq(nt, 1 / nt) / (nt * dt)).reshape((1, nt)).astype(dtype)
# slowness samplings
dp1 = 2 * dt * pmax[0] / dy / nx
dp2 = 2 * dt * pmax[1] / dx / ny
# spatial axes
x = (
(ncp.fft.fftfreq(2 * nx, 1 / (2 * nx)) ** 2)
.reshape((1, 2 * nx, 1))
.astype(dtype)
)
y = (
(ncp.fft.fftfreq(2 * ny, 1 / (2 * ny)) ** 2)
.reshape((2 * ny, 1, 1))
.astype(dtype)
)
# K coefficients
K01 = ncp.exp(sign * np.pi * 1j * dp1 * dy * omega * x).reshape(
(1, int(2 * nx), nt)
)
K02 = ncp.exp(sign * np.pi * 1j * dp2 * dx * omega * y).reshape(
(int(2 * ny), 1, nt)
)
# K conj coefficients
K1 = ncp.conj(ncp.fft.fftshift(K01, axes=(1,)))[:, int(nx / 2) : int(3 * nx / 2), :]
K2 = ncp.conj(ncp.fft.fftshift(K02, axes=(0,)))[int(ny / 2) : int(3 * ny / 2), :, :]
# perform transform
h = ncp.zeros((2 * ny, 2 * nx, nt)).astype(cdtype)
h[0:ny, 0:nx, :] = ncp.fft.fftn(data, axes=(2,)) * K1 * K2
g = ncp.fft.ifftn(
ncp.fft.fftn(h, axes=(1,)) * ncp.fft.fftn(K01, axes=(1,)), axes=(1,)
)
g = ncp.fft.ifftn(
ncp.fft.fftn(g, axes=(0,)) * ncp.fft.fftn(K02, axes=(0,)), axes=(0,)
)
if mode == "i":
g = ncp.fft.ifftn(
g[0:ny, 0:nx, :] * K1 * K2 * abs(omega) ** 2 * dp1 * dp2 * dy * dx,
axes=(2,),
).real
else:
g = ncp.fft.ifftn(g[0:ny, 0:nx, :] * K1 * K2, axes=(2,)).real
return g
def _chirp_radon_3d_fftw(
data: NDArray,
dt: float,
dy: float,
dx: float,
pmax: NDArray,
mode: str = "f",
**kwargs_fftw
) -> NDArray:
"""3D Chirp Radon transform with pyfftw
Applies 3D Radon transform using Fast Fourier Transform and Chirp
functions. (mode='f': forward, 'a': adjoint, and 'i': inverse). See
Chirp3DRadon operator docstring for more details.
Parameters
----------
data : :obj:`numpy.ndarray`
3D input data of size :math:`[n_y \times n_x \times n_t]`
dt : :obj:`float`
Time sampling :math:`dt`
dy : :obj:`float`
Spatial sampling in :math:`y` direction :math:`dy`
dx : :obj:`float`
Spatial sampling in :math:`x` direction :math:`dx`
pmax : :obj:`np.ndarray`
Two element array :math:`(p_y_{max}, p_x_{max})` of :math:`\tan`
of maximum stacking angles in :math:`y` and :math:`x` directions
:math:`(\tan(\alpha_{y,max}), \tan(\alpha_{x,max}))`. If one operates
in terms of minimum velocity :math:`c_0`, then
:math:`p_{y.max}=c_0dy/dt` and :math:`p_{x,max}=c_0dx/dt`
mode : :obj:`str`, optional
Mode of operation, 'f': forward, 'a': adjoint, and 'i': inverse
**kwargs_fftw : :obj:`int`, optional
Additional arguments to pass to pyFFTW computations
(recommended: ``flags=('FFTW_ESTIMATE', ), threads=NTHREADS``)
"""
# define sign for mode
sign = -1.0 if mode == "f" else 1.0
# data size
(ny, nx, nt) = data.shape
# find dtype of input
dtype = np.real(data).dtype
cdtype = (np.ones(1, dtype=dtype) + 1j * np.ones(1, dtype=dtype)).dtype
# frequency axis
omega = (np.fft.fftfreq(nt, 1 / nt) / (nt * dt)).reshape((1, nt)).astype(dtype)
# slowness samplings
dp1 = 2 * dt * pmax[1] / dy / nx
dp2 = 2 * dt * pmax[0] / dx / ny
# pyfftw plans
data = pyfftw.byte_align(data, n=None, dtype=dtype)
K1 = pyfftw.empty_aligned((1, nx, nt), dtype=cdtype)
K2 = pyfftw.empty_aligned((ny, 1, nt), dtype=cdtype)
K01 = pyfftw.empty_aligned((1, 2 * nx, nt), dtype=cdtype)
K02 = pyfftw.empty_aligned((2 * ny, 1, nt), dtype=cdtype)
K01Out = pyfftw.empty_aligned((1, 2 * nx, nt), dtype=cdtype)
K02Out = pyfftw.empty_aligned((2 * ny, 1, nt), dtype=cdtype)
hw = pyfftw.zeros_aligned((2 * ny, 2 * nx, nt), dtype=cdtype)
hw_short = pyfftw.empty_aligned((ny, nx, nt), dtype=cdtype)
gw = pyfftw.empty_aligned((2 * ny, 2 * nx, nt), dtype=cdtype)
gw_short = pyfftw.empty_aligned((ny, nx, nt), dtype=cdtype)
fft_object_t = pyfftw.FFTW(data.astype(cdtype), hw_short, axes=(2,), **kwargs_fftw)
fft_object_x1 = pyfftw.FFTW(hw, gw, axes=(1,), **kwargs_fftw)
fft_object_K01w_x1 = pyfftw.FFTW(K01, K01Out, axes=(1,), **kwargs_fftw)
ifft_object_x1 = pyfftw.FFTW(
hw, gw, axes=(1,), direction="FFTW_BACKWARD", **kwargs_fftw
)
fft_object_x2 = pyfftw.FFTW(gw, hw, axes=(0,), **kwargs_fftw)
fft_object_K02w_x2 = pyfftw.FFTW(K02, K02Out, axes=(0,), **kwargs_fftw)
ifft_object_x2 = pyfftw.FFTW(
hw, gw, axes=(0,), direction="FFTW_BACKWARD", **kwargs_fftw
)
ifft_object_t = pyfftw.FFTW(
hw_short, gw_short, axes=(2,), direction="FFTW_BACKWARD", **kwargs_fftw
)
# spatial axes
xw = (
(np.fft.fftfreq(2 * nx, 1 / (2 * nx)) ** 2)
.reshape((1, 2 * nx, 1))
.astype(dtype)
)
yw = (
(np.fft.fftfreq(2 * ny, 1 / (2 * ny)) ** 2)
.reshape((2 * ny, 1, 1))
.astype(dtype)
)
K01[:, :, :] = np.exp(sign * np.pi * 1j * dp1 * dy * omega * xw).reshape(
(1, int(2 * nx), nt)
)
K02[:, :, :] = np.exp(sign * np.pi * 1j * dp2 * dx * omega * yw).reshape(
(int(2 * ny), 1, nt)
)
K1[:, :, :] = np.conj(np.fft.fftshift(K01, axes=(1,)))[
:, int(nx / 2) + 1 : int(3 * nx / 2) + 1, :
]
K2[:, :, :] = np.conj(np.fft.fftshift(K02, axes=(0,)))[
int(ny / 2) + 1 : int(3 * ny / 2) + 1, :, :
]
hw[0:ny, 0:nx, :] = fft_object_t() * K1 * K2
gw[:, :, :] = ifft_object_x1(fft_object_x1() * fft_object_K01w_x1())
gw[:, :, :] = ifft_object_x2(fft_object_x2() * fft_object_K02w_x2())
if mode == "i":
g = ifft_object_t(
gw[0:ny, 0:nx, :] * K1 * K2 * abs(omega) ** 2 * dp1 * dp2 * dy * dx
).real
else:
g = ifft_object_t(gw[0:ny, 0:nx, :] * K1 * K2).real
return g
| 7,719 | 32.419913 | 88 | py |
pylops | pylops-master/pylops/signalprocessing/chirpradon3d.py | __all__ = ["ChirpRadon3D"]
import logging
import numpy as np
from pylops import LinearOperator
from pylops.utils import deps
from pylops.utils.decorators import reshaped
from pylops.utils.typing import DTypeLike, NDArray
from ._chirpradon3d import _chirp_radon_3d
pyfftw_message = deps.pyfftw_import("the chirpradon3d module")
if pyfftw_message is None:
import pyfftw
from ._chirpradon3d import _chirp_radon_3d_fftw
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
class ChirpRadon3D(LinearOperator):
r"""3D Chirp Radon transform
Apply Radon forward (and adjoint) transform using Fast Fourier Transform
and Chirp functions to a 3-dimensional array of size
:math:`[n_y \times n_x \times n_t]` (both in forward and adjoint mode).
Note that forward and adjoint are swapped compared to the time-space
implementation in :class:`pylops.signalprocessing.Radon3D` and a direct
`inverse` method is also available for this implementation.
Parameters
----------
taxis : :obj:`np.ndarray`
Time axis
hxaxis : :obj:`np.ndarray`
Fast patial axis
hyaxis : :obj:`np.ndarray`
Slow spatial axis
pmax : :obj:`np.ndarray`
Two element array :math:`(p_{y,\text{max}}, p_{x,\text{max}})` of :math:`\tan`
of maximum stacking angles in :math:`y` and :math:`x` directions
:math:`(\tan(\alpha_{y,\text{max}}), \tan(\alpha_{x,\text{max}}))`. If one operates
in terms of minimum velocity :math:`c_0`, then
:math:`p_{y,\text{max}}=c_0\,\mathrm{d}y/\mathrm{d}t` and :math:`p_{x,\text{max}}=c_0\,\mathrm{d}x/\mathrm{d}t`
engine : :obj:`str`, optional
Engine used for fft computation (``numpy`` or ``fftw``)
dtype : :obj:`str`, optional
Type of elements in input array.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
**kwargs_fftw
Arbitrary keyword arguments for :py:class:`pyfftw.FTTW`
(reccomended: ``flags=('FFTW_ESTIMATE', ), threads=NTHREADS``)
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved explicitly (``True``) or
not (``False``)
Notes
-----
Refer to [1]_ for the theoretical and implementation details.
.. [1] Andersson, F and Robertsson J. "Fast :math:`\tau-p` transforms by
chirp modulation", Geophysics, vol 84, NO.1, pp. A13-A17, 2019.
"""
def __init__(
self,
taxis: NDArray,
hyaxis: NDArray,
hxaxis: NDArray,
pmax: NDArray,
engine: str = "numpy",
dtype: DTypeLike = "float64",
name: str = "C",
**kwargs_fftw,
):
dims = len(hyaxis), len(hxaxis), len(taxis)
super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name)
self.ny, self.nx, self.nt = self.dims
self.dt = taxis[1] - taxis[0]
self.dy = hyaxis[1] - hyaxis[0]
self.dx = hxaxis[1] - hxaxis[0]
self.pmax = pmax
self.engine = engine
if self.engine not in ["fftw", "numpy"]:
raise NotImplementedError("engine must be 'numpy' or 'fftw'")
self.kwargs_fftw = kwargs_fftw
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
if self.engine == "fftw" and pyfftw is not None:
return _chirp_radon_3d_fftw(
x, self.dt, self.dy, self.dx, self.pmax, mode="f", **self.kwargs_fftw
)
return _chirp_radon_3d(x, self.dt, self.dy, self.dx, self.pmax, mode="f")
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
if self.engine == "fftw" and pyfftw is not None:
return _chirp_radon_3d_fftw(
x, self.dt, self.dy, self.dx, self.pmax, mode="a", **self.kwargs_fftw
)
return _chirp_radon_3d(x, self.dt, self.dy, self.dx, self.pmax, mode="a")
def inverse(self, x: NDArray) -> NDArray:
x = x.reshape(self.dimsd)
if self.engine == "fftw" and pyfftw is not None:
y = _chirp_radon_3d_fftw(
x, self.dt, self.dy, self.dx, self.pmax, mode="i", **self.kwargs_fftw
)
else:
y = _chirp_radon_3d(x, self.dt, self.dy, self.dx, self.pmax, mode="i")
return y.ravel()
| 4,439 | 33.96063 | 119 | py |
pylops | pylops-master/pylops/signalprocessing/convolve2d.py | __all__ = ["Convolve2D"]
from typing import Union
from pylops.signalprocessing import ConvolveND
from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray
class Convolve2D(ConvolveND):
r"""2D convolution operator.
Apply two-dimensional convolution with a compact filter to model
(and data) along a pair of ``axes`` of a two or
three-dimensional array.
Parameters
----------
dims : :obj:`list` or :obj:`int`
Number of samples for each dimension
h : :obj:`numpy.ndarray`
2d compact filter to be convolved to input signal
offset : :obj:`tuple`, optional
Indices of the center of the compact filter
axes : :obj:`int`, optional
.. versionadded:: 2.0.0
Axes along which convolution is applied
method : :obj:`str`, optional
Method used to calculate the convolution (``direct`` or ``fft``).
dtype : :obj:`str`, optional
Type of elements in input array.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Notes
-----
The Convolve2D operator applies two-dimensional convolution
between the input signal :math:`d(t,x)` and a compact filter kernel
:math:`h(t,x)` in forward model:
.. math::
y(t,x) = \iint\limits_{-\infty}^{\infty}
h(t-\tau,x-\chi) d(\tau,\chi) \,\mathrm{d}\tau \,\mathrm{d}\chi
This operation can be discretized as follows
.. math::
y[i,n] = \sum_{j=-\infty}^{\infty} \sum_{m=-\infty}^{\infty} h[i-j,n-m] d[j,m]
as well as performed in the frequency domain.
.. math::
Y(f, k_x) = \mathscr{F} (h(t,x)) * \mathscr{F} (d(t,x))
Convolve2D operator uses :py:func:`scipy.signal.convolve2d`
that automatically chooses the best domain for the operation
to be carried out.
As the adjoint of convolution is correlation, Convolve2D operator
applies correlation in the adjoint mode.
In time domain:
.. math::
y(t,x) = \iint\limits_{-\infty}^{\infty}
h(t+\tau,x+\chi) d(\tau,\chi) \,\mathrm{d}\tau \,\mathrm{d}\chi
or in frequency domain:
.. math::
y(t, x) = \mathscr{F}^{-1} (H(f, k_x)^* * X(f, k_x))
"""
def __init__(self, dims: Union[int, InputDimsLike],
h: NDArray,
offset: InputDimsLike = (0, 0),
axes: InputDimsLike = (-2, -1),
method: str = "fft",
dtype: DTypeLike = "float64",
name: str = "C", ):
if h.ndim != 2:
raise ValueError("h must be 2-dimensional")
super().__init__(dims=dims, h=h, offset=offset, axes=axes, method=method, dtype=dtype, name=name)
| 2,753 | 30.655172 | 105 | py |
pylops | pylops-master/pylops/signalprocessing/radon3d.py | __all__ = ["Radon3D"]
import logging
from typing import Callable, Optional, Tuple
import numpy as np
from pylops.basicoperators import Spread
from pylops.utils import deps
from pylops.utils.typing import DTypeLike, NDArray
jit_message = deps.numba_import("the radon3d module")
if jit_message is None:
from numba import jit
from ._radon3d_numba import (
_create_table_numba,
_hyperbolic_numba,
_indices_3d_onthefly_numba,
_linear_numba,
_parabolic_numba,
)
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
def _linear(
y: NDArray,
x: NDArray,
t: int,
py: float,
px: float,
) -> NDArray:
return t + px * x + py * y
def _parabolic(
y: NDArray,
x: NDArray,
t: int,
py: float,
px: float,
) -> NDArray:
return t + px * x**2 + py * y**2
def _hyperbolic(
y: NDArray,
x: NDArray,
t: int,
py: float,
px: float,
) -> NDArray:
return np.sqrt(t**2 + (x / px) ** 2 + (y / py) ** 2)
def _indices_3d(
f: Callable,
y: NDArray,
x: NDArray,
py: float,
px: float,
t: int,
nt: int,
interp: bool = True,
) -> Tuple[NDArray, NDArray, Optional[NDArray]]:
"""Compute time and space indices of parametric line in ``f`` function
Parameters
----------
f : :obj:`func`
Function computing values of parametric line for stacking
y : :obj:`np.ndarray`
Slow spatial axis (must be symmetrical around 0 and with sampling 1)
x : :obj:`np.ndarray`
Fast spatial axis (must be symmetrical around 0 and with sampling 1)
py : :obj:`float`
Slowness/curvature in slow axis
px : :obj:`float`
Slowness/curvature in fast axis
t : :obj:`int`
Time sample (time axis is assumed to have sampling 1)
nt : :obj:`int`
Size scaof time axis
interp : :obj:`bool`, optional
Apply linear interpolation (``True``) or nearest interpolation
(``False``) during stacking/spreading along parametric curve
Returns
-------
sscan : :obj:`np.ndarray`
Spatial indices
tscan : :obj:`np.ndarray`
Time indices
dtscan : :obj:`np.ndarray`
Decimal time variations for interpolation
"""
tdecscan = f(y, x, t, py, px)
if not interp:
sscan = (tdecscan >= 0) & (tdecscan < nt)
else:
sscan = (tdecscan >= 0) & (tdecscan < nt - 1)
tscan = tdecscan[sscan].astype(int)
if interp:
dtscan = tdecscan[sscan] - tscan
else:
dtscan = None
return sscan, tscan, dtscan
def _indices_3d_onthefly(
f: Callable,
y: NDArray,
x: NDArray,
py: NDArray,
px: NDArray,
ip: int,
it: int,
nt: int,
interp: bool = True,
) -> Tuple[NDArray, NDArray, Optional[NDArray]]:
"""Wrapper around _indices_3d to allow on-the-fly computation of
parametric curves"""
tscan = np.full(len(y), np.nan, dtype=np.float32)
if interp:
dtscan = np.full(len(y), np.nan)
sscan, tscan1, dtscan1 = _indices_3d(f, y, x, py[ip], px[ip], it, nt, interp=interp)
tscan[sscan] = tscan1
if interp:
dtscan[sscan] = dtscan1
return sscan, tscan, (dtscan if interp else None)
def _create_table(
f: Callable,
y: NDArray,
x: NDArray,
pyaxis: NDArray,
pxaxis: NDArray,
nt: int,
npy: int,
npx: int,
ny: int,
nx: int,
interp: bool,
) -> Tuple[NDArray, Optional[NDArray]]:
"""Create look up table"""
table = np.full((npx * npy, nt, ny * nx), np.nan, dtype=np.float32)
if interp:
dtable = np.full((npx * npy, nt, ny * nx), np.nan)
for ip, (py, px) in enumerate(zip(pyaxis, pxaxis)):
for it in range(nt):
sscan, tscan, dtscan = _indices_3d(f, y, x, py, px, it, nt, interp=interp)
table[ip, it, sscan] = tscan
if interp:
dtable[ip, it, sscan] = dtscan
return table, (dtable if interp else None)
def Radon3D(
taxis: NDArray,
hyaxis: NDArray,
hxaxis: NDArray,
pyaxis: NDArray,
pxaxis: NDArray,
kind: str = "linear",
centeredh: bool = True,
interp: bool = True,
onthefly: bool = False,
engine: str = "numpy",
dtype: DTypeLike = "float64",
name: str = "R",
):
r"""Three dimensional Radon transform.
Apply three dimensional Radon forward (and adjoint) transform to a
3-dimensional array of size :math:`[n_{p_y} \times n_{p_x} \times n_t]`
(and :math:`[n_y \times n_x \times n_t]`).
In forward mode this entails to spreading the model vector
along parametric curves (lines, parabolas, or hyperbolas depending on the
choice of ``kind``), while stacking values in the data vector
along the same parametric curves is performed in adjoint mode.
Parameters
----------
taxis : :obj:`np.ndarray`
Time axis
hxaxis : :obj:`np.ndarray`
Fast patial axis
hyaxis : :obj:`np.ndarray`
Slow spatial axis
pyaxis : :obj:`np.ndarray`
Axis of scanning variable :math:`p_y` of parametric curve
pxaxis : :obj:`np.ndarray`
Axis of scanning variable :math:`p_x` of parametric curve
kind : :obj:`str`, optional
Curve to be used for stacking/spreading (``linear``, ``parabolic``,
and ``hyperbolic`` are currently supported)
centeredh : :obj:`bool`, optional
Assume centered spatial axis (``True``) or not (``False``). If ``True``
the original ``haxis`` is ignored and a new axis is created.
interp : :obj:`bool`, optional
Apply linear interpolation (``True``) or nearest interpolation
(``False``) during stacking/spreading along parametric curve
onthefly : :obj:`bool`, optional
Compute stacking parametric curves on-the-fly as part of forward
and adjoint modelling (``True``) or at initialization and store them
in look-up table (``False``). Using a look-up table is computationally
more efficient but increases the memory burden
engine : :obj:`str`, optional
Engine used for computation (``numpy`` or ``numba``)
dtype : :obj:`str`, optional
Type of elements in input array.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Returns
-------
r3op : :obj:`pylops.LinearOperator`
Radon operator
Raises
------
KeyError
If ``engine`` is neither ``numpy`` nor ``numba``
NotImplementedError
If ``kind`` is not ``linear``, ``parabolic``, or ``hyperbolic``
See Also
--------
pylops.signalprocessing.Radon2D: Two dimensional Radon transform
pylops.Spread: Spread operator
Notes
-----
The Radon3D operator applies the following linear transform in adjoint mode
to the data after reshaping it into a 3-dimensional array of
size :math:`[n_y \times n_x \times n_t]` in adjoint mode:
.. math::
m(p_y, p_x, t_0) = \int{d(y, x, t = f(p_y, p_x, y, x, t))} \,\mathrm{d}x \,\mathrm{d}y
where :math:`f(p_y, p_x, y, x, t) = t_0 + p_y y + p_x x` in linear
mode, :math:`f(p_y, p_x, y, x, t) = t_0 + p_y y^2 + p_x x^2` in
parabolic mode, and
:math:`f(p_y, p_x, y, x, t) = \sqrt{t_0^2 + y^2 / p_y^2 + x^2 / p_x^2}`
in hyperbolic mode. Note that internally the :math:`p_x` and :math:`p_y`
axes will be normalized by the ratio of the spatial and time axes and
used alongside unitless axes. Whilst this makes the linear mode fully
unitless, users are required to apply additional scalings to the :math:`p_x`
axis for other relationships
- :math:`p_x` should be pre-multipled by :math:`d_x / d_y` for the parabolic
relationship;
- :math:`p_x` should be pre-multipled by :math:`(d_t/d_x)^2 / (d_t/d_y)^2`
for the hyperbolic relationship.
As the adjoint operator can be interpreted as a repeated summation of sets
of elements of the model vector along chosen parametric curves, the
forward is implemented as spreading of values in the data vector along the
same parametric curves. This operator is actually a thin wrapper around
the :class:`pylops.Spread` operator.
"""
# engine
if engine not in ["numpy", "numba"]:
raise KeyError("engine must be numpy or numba")
if engine == "numba" and jit_message is not None:
engine = "numpy"
# axes
nt, nhy, nhx = taxis.size, hyaxis.size, hxaxis.size
npy, npx = pyaxis.size, pxaxis.size
if kind == "linear":
f = _linear if engine == "numpy" else _linear_numba
elif kind == "parabolic":
f = _parabolic if engine == "numpy" else _parabolic_numba
elif kind == "hyperbolic":
f = _hyperbolic if engine == "numpy" else _hyperbolic_numba
else:
raise NotImplementedError("kind must be linear, " "parabolic, or hyperbolic...")
# make axes unitless
dhy, dhx = np.abs(hyaxis[1] - hyaxis[0]), np.abs(hxaxis[1] - hxaxis[0])
dt = np.abs(taxis[1] - taxis[0])
dpy = dhy / dt
pyaxis = pyaxis * dpy
dpx = dhx / dt
pxaxis = pxaxis * dpx
if not centeredh:
hyaxisunitless = hyaxis / dhy
hxaxisunitless = hxaxis / dhx
else:
hyaxisunitless = np.arange(nhy) - nhy // 2 + ((nhy + 1) % 2) / 2
hxaxisunitless = np.arange(nhx) - nhx // 2 + ((nhx + 1) % 2) / 2
# create grid for py and px axis
hyaxisunitless, hxaxisunitless = np.meshgrid(
hyaxisunitless, hxaxisunitless, indexing="ij"
)
pyaxis, pxaxis = np.meshgrid(pyaxis, pxaxis, indexing="ij")
dims = (npy * npx, nt)
dimsd = (nhy * nhx, nt)
if onthefly:
if engine == "numba":
@jit(nopython=True, nogil=True)
def ontheflyfunc(x, y):
return _indices_3d_onthefly_numba(
f,
hyaxisunitless.ravel(),
hxaxisunitless.ravel(),
pyaxis.ravel(),
pxaxis.ravel(),
x,
y,
nt,
interp=interp,
)[1:]
else:
if interp:
def ontheflyfunc(x, y):
return _indices_3d_onthefly(
f,
hyaxisunitless.ravel(),
hxaxisunitless.ravel(),
pyaxis.ravel(),
pxaxis.ravel(),
x,
y,
nt,
interp=interp,
)[1:]
else:
def ontheflyfunc(x, y):
return _indices_3d_onthefly(
f,
hyaxisunitless.ravel(),
hxaxisunitless.ravel(),
pyaxis.ravel(),
pxaxis.ravel(),
x,
y,
nt,
interp=interp,
)[1]
r3op = Spread(
dims, dimsd, fh=ontheflyfunc, interp=interp, engine=engine, dtype=dtype
)
else:
if engine == "numba":
tablefunc = _create_table_numba
else:
tablefunc = _create_table
table, dtable = tablefunc(
f,
hyaxisunitless.ravel(),
hxaxisunitless.ravel(),
pyaxis.ravel(),
pxaxis.ravel(),
nt,
npy,
npx,
nhy,
nhx,
interp=interp,
)
if not interp:
dtable = None
r3op = Spread(
dims,
dimsd,
table=table,
dtable=dtable,
interp=interp,
engine=engine,
dtype=dtype,
)
r3op.name = name
return r3op
| 11,959 | 29.35533 | 94 | py |
pylops | pylops-master/pylops/signalprocessing/fftnd.py | __all__ = ["FFTND"]
import logging
import warnings
from typing import Optional, Sequence, Union
import numpy as np
import numpy.typing as npt
from pylops.signalprocessing._baseffts import _BaseFFTND, _FFTNorms
from pylops.utils.backend import get_sp_fft
from pylops.utils.decorators import reshaped
from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
class _FFTND_numpy(_BaseFFTND):
"""N-dimensional Fast-Fourier Transform using NumPy"""
def __init__(
self,
dims: Union[int, InputDimsLike],
axes: Union[int, InputDimsLike] = (-3, -2, -1),
nffts: Optional[Union[int, InputDimsLike]] = None,
sampling: Union[float, Sequence[float]] = 1.0,
norm: str = "ortho",
real: bool = False,
ifftshift_before: bool = False,
fftshift_after: bool = False,
dtype: DTypeLike = "complex128",
) -> None:
super().__init__(
dims=dims,
axes=axes,
nffts=nffts,
sampling=sampling,
norm=norm,
real=real,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
)
if self.cdtype != np.complex128:
warnings.warn(
f"numpy backend always returns complex128 dtype. To respect the passed dtype, data will be cast to {self.cdtype}."
)
self._norm_kwargs = {"norm": None} # equivalent to "backward" in Numpy/Scipy
if self.norm is _FFTNorms.ORTHO:
self._norm_kwargs["norm"] = "ortho"
elif self.norm is _FFTNorms.NONE:
self._scale = np.prod(self.nffts)
elif self.norm is _FFTNorms.ONE_OVER_N:
self._scale = 1.0 / np.prod(self.nffts)
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
if self.ifftshift_before.any():
x = np.fft.ifftshift(x, axes=self.axes[self.ifftshift_before])
if not self.clinear:
x = np.real(x)
if self.real:
y = np.fft.rfftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
# Apply scaling to obtain a correct adjoint for this operator
y = np.swapaxes(y, -1, self.axes[-1])
y[..., 1 : 1 + (self.nffts[-1] - 1) // 2] *= np.sqrt(2)
y = np.swapaxes(y, self.axes[-1], -1)
else:
y = np.fft.fftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
if self.norm is _FFTNorms.ONE_OVER_N:
y *= self._scale
y = y.astype(self.cdtype)
if self.fftshift_after.any():
y = np.fft.fftshift(y, axes=self.axes[self.fftshift_after])
return y
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
if self.fftshift_after.any():
x = np.fft.ifftshift(x, axes=self.axes[self.fftshift_after])
if self.real:
# Apply scaling to obtain a correct adjoint for this operator
x = x.copy()
x = np.swapaxes(x, -1, self.axes[-1])
x[..., 1 : 1 + (self.nffts[-1] - 1) // 2] /= np.sqrt(2)
x = np.swapaxes(x, self.axes[-1], -1)
y = np.fft.irfftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
else:
y = np.fft.ifftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
if self.norm is _FFTNorms.NONE:
y *= self._scale
for ax, nfft in zip(self.axes, self.nffts):
if nfft > self.dims[ax]:
y = np.take(y, range(self.dims[ax]), axis=ax)
if self.doifftpad:
y = np.pad(y, self.ifftpad)
if not self.clinear:
y = np.real(y)
y = y.astype(self.rdtype)
if self.ifftshift_before.any():
y = np.fft.fftshift(y, axes=self.axes[self.ifftshift_before])
return y
def __truediv__(self, y: npt.ArrayLike) -> npt.ArrayLike:
if self.norm is not _FFTNorms.ORTHO:
return self._rmatvec(y) / self._scale
return self._rmatvec(y)
class _FFTND_scipy(_BaseFFTND):
"""N-dimensional Fast-Fourier Transform using SciPy"""
def __init__(
self,
dims: Union[int, InputDimsLike],
axes: Union[int, InputDimsLike] = (-3, -2, -1),
nffts: Optional[Union[int, InputDimsLike]] = None,
sampling: Union[float, Sequence[float]] = 1.0,
norm: str = "ortho",
real: bool = False,
ifftshift_before: bool = False,
fftshift_after: bool = False,
dtype: DTypeLike = "complex128",
) -> None:
super().__init__(
dims=dims,
axes=axes,
nffts=nffts,
sampling=sampling,
norm=norm,
real=real,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
)
self._norm_kwargs = {"norm": None} # equivalent to "backward" in Numpy/Scipy
if self.norm is _FFTNorms.ORTHO:
self._norm_kwargs["norm"] = "ortho"
elif self.norm is _FFTNorms.NONE:
self._scale = np.prod(self.nffts)
elif self.norm is _FFTNorms.ONE_OVER_N:
self._scale = 1.0 / np.prod(self.nffts)
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
sp_fft = get_sp_fft(x)
if self.ifftshift_before.any():
x = sp_fft.ifftshift(x, axes=self.axes[self.ifftshift_before])
if not self.clinear:
x = np.real(x)
if self.real:
y = sp_fft.rfftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
# Apply scaling to obtain a correct adjoint for this operator
y = np.swapaxes(y, -1, self.axes[-1])
y[..., 1 : 1 + (self.nffts[-1] - 1) // 2] *= np.sqrt(2)
y = np.swapaxes(y, self.axes[-1], -1)
else:
y = sp_fft.fftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
if self.norm is _FFTNorms.ONE_OVER_N:
y *= self._scale
if self.fftshift_after.any():
y = sp_fft.fftshift(y, axes=self.axes[self.fftshift_after])
return y
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
sp_fft = get_sp_fft(x)
if self.fftshift_after.any():
x = sp_fft.ifftshift(x, axes=self.axes[self.fftshift_after])
if self.real:
# Apply scaling to obtain a correct adjoint for this operator
x = x.copy()
x = np.swapaxes(x, -1, self.axes[-1])
x[..., 1 : 1 + (self.nffts[-1] - 1) // 2] /= np.sqrt(2)
x = np.swapaxes(x, self.axes[-1], -1)
y = sp_fft.irfftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
else:
y = sp_fft.ifftn(x, s=self.nffts, axes=self.axes, **self._norm_kwargs)
if self.norm is _FFTNorms.NONE:
y *= self._scale
for ax, nfft in zip(self.axes, self.nffts):
if nfft > self.dims[ax]:
y = np.take(y, range(self.dims[ax]), axis=ax)
if self.doifftpad:
y = np.pad(y, self.ifftpad)
if not self.clinear:
y = np.real(y)
if self.ifftshift_before.any():
y = sp_fft.fftshift(y, axes=self.axes[self.ifftshift_before])
return y
def __truediv__(self, y: npt.ArrayLike) -> npt.ArrayLike:
if self.norm is not _FFTNorms.ORTHO:
return self._rmatvec(y) / self._scale
return self._rmatvec(y)
def FFTND(
dims: Union[int, InputDimsLike],
axes: Union[int, InputDimsLike] = (-3, -2, -1),
nffts: Optional[Union[int, InputDimsLike]] = None,
sampling: Union[float, Sequence[float]] = 1.0,
norm: str = "ortho",
real: bool = False,
ifftshift_before: bool = False,
fftshift_after: bool = False,
engine: str = "scipy",
dtype: DTypeLike = "complex128",
name: str = "F",
):
r"""N-dimensional Fast-Fourier Transform.
Apply N-dimensional Fast-Fourier Transform (FFT) to any n ``axes``
of a multi-dimensional array.
Using the default NumPy engine, the FFT operator is an overload to either the NumPy
:py:func:`numpy.fft.fftn` (or :py:func:`numpy.fft.rfftn` for real models) in
forward mode, and to :py:func:`numpy.fft.ifftn` (or :py:func:`numpy.fft.irfftn`
for real models) in adjoint mode, or their CuPy equivalents.
Alternatively, when the SciPy engine is chosen, the overloads are of
:py:func:`scipy.fft.fftn` (or :py:func:`scipy.fft.rfftn` for real models) in
forward mode, and to :py:func:`scipy.fft.ifftn` (or :py:func:`scipy.fft.irfftn`
for real models) in adjoint mode.
When using ``real=True``, the result of the forward is also multiplied by
:math:`\sqrt{2}` for all frequency bins except zero and Nyquist along the last
``axes``, and the input of the adjoint is multiplied by
:math:`1 / \sqrt{2}` for the same frequencies.
For a real valued input signal, it is advised to use the flag ``real=True``
as it stores the values of the Fourier transform of the last axis in ``axes`` at positive
frequencies only as values at negative frequencies are simply their complex conjugates.
Parameters
----------
dims : :obj:`tuple`
Number of samples for each dimension
axes : :obj:`int`, optional
.. versionadded:: 2.0.0
Axes (or axis) along which FFTND is applied
nffts : :obj:`tuple` or :obj:`int`, optional
Number of samples in Fourier Transform for each axis in ``axes``. In case only one
dimension needs to be specified, use ``None`` for the other dimension in the
tuple. An axis with ``None`` will use ``dims[axis]`` as ``nfft``.
When supplying a tuple, the length must agree with that
of ``axes``. When a single value is passed, it will be used for all
``axes`. As such the default is equivalent to ``nffts=(None, ..., None)``.
sampling : :obj:`tuple` or :obj:`float`, optional
Sampling steps for each direction. When supplied a single value, it is used
for all directions. Unlike ``nffts``, any ``None`` will not be converted to the
default value.
norm : `{"ortho", "none", "1/n"}`, optional
.. versionadded:: 1.17.0
- "ortho": Scales forward and adjoint FFT transforms with :math:`1/\sqrt{N_F}`,
where :math:`N_F` is the number of samples in the Fourier domain given by
product of all elements of ``nffts``.
- "none": Does not scale the forward or the adjoint FFT transforms.
- "1/n": Scales both the forward and adjoint FFT transforms by
:math:`1/N_F`.
.. note:: For "none" and "1/n", the operator is not unitary, that is, the
adjoint is not the inverse. To invert the operator, simply use ``Op \ y``.
real : :obj:`bool`, optional
Model to which fft is applied has real numbers (``True``) or not
(``False``). Used to enforce that the output of adjoint of a real
model is real. Note that the real FFT is applied only to the first
dimension to which the FFTND operator is applied (last element of
``axes``)
ifftshift_before : :obj:`tuple` or :obj:`bool`, optional
.. versionadded:: 1.17.0
Apply ifftshift (``True``) or not (``False``) to model vector (before FFT).
Consider using this option when the model vector's respective axis is symmetric
with respect to the zero value sample. This will shift the zero value sample to
coincide with the zero index sample. With such an arrangement, FFT will not
introduce a sample-dependent phase-shift when compared to the continuous Fourier
Transform.
When passing a single value, the shift will the same for every direction. Pass
a tuple to specify which dimensions are shifted.
fftshift_after : :obj:`tuple` or :obj:`bool`, optional
.. versionadded:: 1.17.0
Apply fftshift (``True``) or not (``False``) to data vector (after FFT).
Consider using this option when you require frequencies to be arranged
naturally, from negative to positive. When not applying fftshift after FFT,
frequencies are arranged from zero to largest positive, and then from negative
Nyquist to the frequency bin before zero.
When passing a single value, the shift will the same for every direction. Pass
a tuple to specify which dimensions are shifted.
engine : :obj:`str`, optional
.. versionadded:: 1.17.0
Engine used for fft computation (``numpy`` or ``scipy``).
dtype : :obj:`str`, optional
Type of elements in input array. Note that the ``dtype`` of the operator
is the corresponding complex type even when a real type is provided.
In addition, note that the NumPy backend does not support returning ``dtype``
different than ``complex128``. As such, when using the NumPy backend, arrays will
be force-cast to types corresponding to the supplied ``dtype``.
The SciPy backend supports all precisions natively.
Under both backends, when a real ``dtype`` is supplied, a real result will be
enforced on the result of the ``rmatvec`` and the input of the ``matvec``.
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Attributes
----------
dimsd : :obj:`tuple`
Shape of the array after the forward, but before linearization.
For example, ``y_reshaped = (Op * x.ravel()).reshape(Op.dimsd)``.
fs : :obj:`tuple`
Each element of the tuple corresponds to the Discrete Fourier Transform
sample frequencies along the respective direction given by ``axes``.
real : :obj:`bool`
When ``True``, uses ``rfftn``/``irfftn``
rdtype : :obj:`bool`
Expected input type to the forward
cdtype : :obj:`bool`
Output type of the forward. Complex equivalent to ``rdtype``.
shape : :obj:`tuple`
Operator shape
clinear : :obj:`bool`
.. versionadded:: 1.17.0
Operator is complex-linear. Is false when either ``real=True`` or when
``dtype`` is not a complex type.
explicit : :obj:`bool`
Operator contains a matrix that can be solved explicitly
(``True``) or not (``False``)
See Also
--------
FFT: One-dimensional FFT
FFT2D: Two-dimensional FFT
Raises
------
ValueError
- If ``nffts`` or ``sampling`` are not either a single value or tuple with
the same dimension ``axes``.
- If ``norm`` is not one of "ortho", "none", or "1/n".
NotImplementedError
If ``engine`` is neither ``numpy``, nor ``scipy``.
Notes
-----
The FFTND operator (using ``norm="ortho"``) applies the N-dimensional forward
Fourier transform to a multi-dimensional array. Considering an N-dimensional
signal :math:`d(x_1, \ldots, x_N)`. The FFTND in forward mode is:
.. math::
D(k_1, \ldots, k_N) = \mathscr{F} (d) = \frac{1}{\sqrt{N_F}}
\int\limits_{-\infty}^\infty \cdots \int\limits_{-\infty}^\infty
d(x_1, \ldots, x_N)
e^{-j2\pi k_1 x_1} \cdots
e^{-j 2 \pi k_N x_N} \,\mathrm{d}x_1 \cdots \mathrm{d}x_N
Similarly, the three-dimensional inverse Fourier transform is applied to
the Fourier spectrum :math:`D(k_z, k_y, k_x)` in adjoint mode:
.. math::
d(x_1, \ldots, x_N) = \mathscr{F}^{-1} (D) = \frac{1}{\sqrt{N_F}}
\int\limits_{-\infty}^\infty \cdots \int\limits_{-\infty}^\infty
D(k_1, \ldots, k_N)
e^{-j2\pi k_1 x_1} \cdots
e^{-j 2 \pi k_N x_N} \,\mathrm{d}k_1 \cdots \mathrm{d}k_N
where :math:`N_F` is the number of samples in the Fourier domain given by the
product of the element of ``nffts``.
Both operators are effectively discretized and solved by a fast iterative
algorithm known as Fast Fourier Transform. Note that the FFTND operator
(using ``norm="ortho"``) is a special operator in that the adjoint is also
the inverse of the forward mode. For other norms, this does not hold (see ``norm``
help). However, for any norm, the N-dimensional Fourier transform is Hermitian
for real input signals.
"""
if engine == "numpy":
f = _FFTND_numpy(
dims=dims,
axes=axes,
nffts=nffts,
sampling=sampling,
norm=norm,
real=real,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
)
elif engine == "scipy":
f = _FFTND_scipy(
dims=dims,
axes=axes,
nffts=nffts,
sampling=sampling,
norm=norm,
real=real,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
)
else:
raise NotImplementedError("engine must be numpy or scipy")
f.name = name
return f
| 17,149 | 40.225962 | 130 | py |
pylops | pylops-master/pylops/signalprocessing/dct.py | __all__ = ["DCT"]
from typing import List, Optional, Union
import numpy as np
from scipy import fft
from pylops import LinearOperator
from pylops.utils._internal import _value_or_sized_to_tuple
from pylops.utils.decorators import reshaped
from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray
class DCT(LinearOperator):
r"""Discrete Cosine Transform.
Apply 1D or ND-Cosine Transform along one or more ``axes`` of a multi-dimensional
array of size ``dims``.
This operator is an overload of :py:func:`scipy.fft.dctn` in forward mode and :py:func:`scipy.fft.idctn`
in adjoint mode.
Parameters
----------
dims : :obj:`int` or :obj:`tuple`
Number of samples for each dimension
type : :obj:`int`, optional
Type of DCT (see scipy's documentation for more details). Default type is 2.
axes : :obj:`int` or :obj:`list`, optional
Axes over which the DCT is computed. If ``None``, the transform is applied
over all axes.
workers :obj:`int`, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from os.cpu_count().
dtype : :obj:`DTypeLike`, optional
Type of elements in input array.
name : :obj:`str`, optional
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved explicitly
(``True``) or not (``False``)
Raises
------
ValueError
If ``type`` is different from 1, 2, 3, or 4.
Notes
-----
The DCT operator applies the Discrete Cosine Transform in forward mode and the Inverse Discrete Cosine Transform
in adjoint mode. This transform expresses a signal as a sum of cosine functions oscillating at different
frequencies. By doing so, no information is lost and the energy is compacted into the top left corner of the
transform. When applied to multi-dimensional arrays, the DCT operator is simply a cascade of one-dimensional DCT
operators acting along the different axes,
Finally, note that the DCT operator is implemented with normalization mode ``norm="ortho"`` to ensure symmetric
scaling.
"""
def __init__(
self,
dims: Union[int, InputDimsLike],
type: int = 2,
axes: Union[int, List[int]] = None,
dtype: DTypeLike = "float64",
workers: Optional[int] = None,
name: str = "C",
) -> None:
if type > 4 or type < 1:
raise ValueError("wrong type value, it can only be 1, 2, 3 or 4")
self.type = type
self.axes = axes
self.workers = workers
self.dims = _value_or_sized_to_tuple(dims)
super().__init__(
dtype=np.dtype(dtype), dims=self.dims, dimsd=self.dims, name=name
)
@reshaped
def _matvec(self, x: NDArray) -> NDArray:
return fft.dctn(
x, axes=self.axes, type=self.type, norm="ortho", workers=self.workers
)
@reshaped
def _rmatvec(self, x: NDArray) -> NDArray:
return fft.idctn(
x, axes=self.axes, type=self.type, norm="ortho", workers=self.workers
)
| 3,296 | 32.989691 | 116 | py |
pylops | pylops-master/pylops/signalprocessing/sliding2d.py | __all__ = [
"sliding2d_design",
"Sliding2D",
]
import logging
from typing import Tuple
import numpy as np
from pylops import LinearOperator
from pylops.basicoperators import BlockDiag, Diagonal, HStack, Restriction
from pylops.utils.tapers import taper2d
from pylops.utils.typing import InputDimsLike, NDArray
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING)
def _slidingsteps(
ntr: int,
nwin: int,
nover: int,
) -> Tuple[NDArray, NDArray]:
"""Identify sliding window initial and end points given overall
trace length, window length and overlap
Parameters
----------
ntr : :obj:`int`
Number of samples in trace
nwin : :obj:`int`
Number of samples of window
nover : :obj:`int`
Number of samples of overlapping part of window
Returns
-------
starts : :obj:`np.ndarray`
Start indices
ends : :obj:`np.ndarray`
End indices
"""
if nwin > ntr:
raise ValueError(f"nwin={nwin} is bigger than ntr={ntr}...")
step = nwin - nover
starts = np.arange(0, ntr - nwin + 1, step, dtype=int)
ends = starts + nwin
return starts, ends
def sliding2d_design(
dimsd: Tuple[int, int],
nwin: int,
nover: int,
nop: Tuple[int, int],
) -> Tuple[int, Tuple[int, int], Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]]:
"""Design Sliding2D operator
This routine can be used prior to creating the :class:`pylops.signalprocessing.Sliding2D`
operator to identify the correct number of windows to be used based on the dimension of the data (``dimsd``),
dimension of the window (``nwin``), overlap (``nover``),a and dimension of the operator acting in the model
space.
Parameters
----------
dimsd : :obj:`tuple`
Shape of 2-dimensional data.
nwin : :obj:`int`
Number of samples of window.
nover : :obj:`int`
Number of samples of overlapping part of window.
nop : :obj:`tuple`
Size of model in the transformed domain.
Returns
-------
nwins : :obj:`int`
Number of windows.
dims : :obj:`tuple`
Size of 2-dimensional model.
mwins_inends : :obj:`tuple`
Start and end indices for model patches (stored as tuple of tuples).
dwins_inends : :obj:`tuple`
Start and end indices for data patches (stored as tuple of tuples).
"""
# data windows
dwin_ins, dwin_ends = _slidingsteps(dimsd[0], nwin, nover)
dwins_inends = (dwin_ins, dwin_ends)
nwins = len(dwin_ins)
# model windows
dims = (nwins * nop[0], nop[1])
mwin_ins, mwin_ends = _slidingsteps(dims[0], nop[0], 0)
mwins_inends = (mwin_ins, mwin_ends)
# print information about patching
logging.warning("%d windows required...", nwins)
logging.warning(
"data wins - start:%s, end:%s",
dwin_ins,
dwin_ends,
)
logging.warning(
"model wins - start:%s, end:%s",
mwin_ins,
mwin_ends,
)
return nwins, dims, mwins_inends, dwins_inends
def Sliding2D(
Op: LinearOperator,
dims: InputDimsLike,
dimsd: InputDimsLike,
nwin: int,
nover: int,
tapertype: str = "hanning",
name: str = "S",
) -> LinearOperator:
"""2D Sliding transform operator.
Apply a transform operator ``Op`` repeatedly to slices of the model
vector in forward mode and slices of the data vector in adjoint mode.
More specifically, in forward mode the model vector is divided into
slices, each slice is transformed, and slices are then recombined in a
sliding window fashion. Both model and data are internally reshaped and
interpreted as 2-dimensional arrays: each slice contains a portion
of the array in the first dimension (and the entire second dimension).
This operator can be used to perform local, overlapping transforms (e.g.,
:obj:`pylops.signalprocessing.FFT2D`
or :obj:`pylops.signalprocessing.Radon2D`) on 2-dimensional arrays.
.. note:: The shape of the model has to be consistent with
the number of windows for this operator not to return an error. As the
number of windows depends directly on the choice of ``nwin`` and
``nover``, it is recommended to first run ``sliding2d_design`` to obtain
the corresponding ``dims`` and number of windows.
.. warning:: Depending on the choice of `nwin` and `nover` as well as the
size of the data, sliding windows may not cover the entire data.
The start and end indices of each window will be displayed and returned
with running ``sliding2d_design``.
Parameters
----------
Op : :obj:`pylops.LinearOperator`
Transform operator
dims : :obj:`tuple`
Shape of 2-dimensional model. Note that ``dims[0]`` should be multiple
of the model size of the transform in the first dimension
dimsd : :obj:`tuple`
Shape of 2-dimensional data
nwin : :obj:`int`
Number of samples of window
nover : :obj:`int`
Number of samples of overlapping part of window
tapertype : :obj:`str`, optional
Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``)
name : :obj:`str`, optional
.. versionadded:: 2.0.0
Name of operator (to be used by :func:`pylops.utils.describe.describe`)
Returns
-------
Sop : :obj:`pylops.LinearOperator`
Sliding operator
Raises
------
ValueError
Identified number of windows is not consistent with provided model
shape (``dims``).
"""
# data windows
dwin_ins, dwin_ends = _slidingsteps(dimsd[0], nwin, nover)
nwins = len(dwin_ins)
# check patching
if nwins * Op.shape[1] // dims[1] != dims[0]:
raise ValueError(
f"Model shape (dims={dims}) is not consistent with chosen "
f"number of windows. Run sliding2d_design to identify the "
f"correct number of windows for the current "
"model size..."
)
# create tapers
if tapertype is not None:
tap = taper2d(dimsd[1], nwin, nover, tapertype=tapertype)
tapin = tap.copy()
tapin[:nover] = 1
tapend = tap.copy()
tapend[-nover:] = 1
taps = {}
taps[0] = tapin
for i in range(1, nwins - 1):
taps[i] = tap
taps[nwins - 1] = tapend
# transform to apply
if tapertype is None:
OOp = BlockDiag([Op for _ in range(nwins)])
else:
OOp = BlockDiag([Diagonal(taps[itap].ravel()) * Op for itap in range(nwins)])
combining = HStack(
[
Restriction(dimsd, range(win_in, win_end), axis=0, dtype=Op.dtype).H
for win_in, win_end in zip(dwin_ins, dwin_ends)
]
)
Sop = LinearOperator(combining * OOp)
Sop.dims, Sop.dimsd = (nwins, int(dims[0] // nwins), dims[1]), dimsd
Sop.name = name
return Sop
| 6,953 | 30.466063 | 113 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/sotabench.py | import os
import numpy as np
import PIL
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import ImageNet
from efficientnet_pytorch import EfficientNet
from sotabencheval.image_classification import ImageNetEvaluator
from sotabencheval.utils import is_server
if is_server():
DATA_ROOT = DATA_ROOT = os.environ.get('IMAGENET_DIR', './imagenet') # './.data/vision/imagenet'
else: # local settings
DATA_ROOT = os.environ['IMAGENET_DIR']
assert bool(DATA_ROOT), 'please set IMAGENET_DIR environment variable'
print('Local data root: ', DATA_ROOT)
model_name = 'EfficientNet-B5'
model = EfficientNet.from_pretrained(model_name.lower())
image_size = EfficientNet.get_image_size(model_name.lower())
input_transform = transforms.Compose([
transforms.Resize(image_size, PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
test_dataset = ImageNet(
DATA_ROOT,
split="val",
transform=input_transform,
target_transform=None,
)
test_loader = DataLoader(
test_dataset,
batch_size=128,
shuffle=False,
num_workers=4,
pin_memory=True,
)
model = model.cuda()
model.eval()
evaluator = ImageNetEvaluator(model_name=model_name,
paper_arxiv_id='1905.11946')
def get_img_id(image_name):
return image_name.split('/')[-1].replace('.JPEG', '')
with torch.no_grad():
for i, (input, target) in enumerate(test_loader):
input = input.to(device='cuda', non_blocking=True)
target = target.to(device='cuda', non_blocking=True)
output = model(input)
image_ids = [get_img_id(img[0]) for img in test_loader.dataset.imgs[i*test_loader.batch_size:(i+1)*test_loader.batch_size]]
evaluator.add(dict(zip(image_ids, list(output.cpu().numpy()))))
if evaluator.cache_exists:
break
if not is_server():
print("Results:")
print(evaluator.get_results())
evaluator.save()
| 2,094 | 28.097222 | 131 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'efficientnet_pytorch'
DESCRIPTION = 'EfficientNet implemented in PyTorch.'
URL = 'https://github.com/lukemelas/EfficientNet-PyTorch'
EMAIL = 'lmelaskyriazi@college.harvard.edu'
AUTHOR = 'Luke'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = '0.7.1'
# What packages are required for this module to be executed?
REQUIRED = [
'torch'
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# py_modules=['model'], # If your package is a single module, use this instead of 'packages'
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='Apache',
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 3,543 | 27.580645 | 96 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/hubconf.py | from efficientnet_pytorch import EfficientNet as _EfficientNet
dependencies = ['torch']
def _create_model_fn(model_name):
def _model_fn(num_classes=1000, in_channels=3, pretrained='imagenet'):
"""Create Efficient Net.
Described in detail here: https://arxiv.org/abs/1905.11946
Args:
num_classes (int, optional): Number of classes, default is 1000.
in_channels (int, optional): Number of input channels, default
is 3.
pretrained (str, optional): One of [None, 'imagenet', 'advprop']
If None, no pretrained model is loaded.
If 'imagenet', models trained on imagenet dataset are loaded.
If 'advprop', models trained using adversarial training called
advprop are loaded. It is important to note that the
preprocessing required for the advprop pretrained models is
slightly different from normal ImageNet preprocessing
"""
model_name_ = model_name.replace('_', '-')
if pretrained is not None:
model = _EfficientNet.from_pretrained(
model_name=model_name_,
advprop=(pretrained == 'advprop'),
num_classes=num_classes,
in_channels=in_channels)
else:
model = _EfficientNet.from_name(
model_name=model_name_,
override_params={'num_classes': num_classes},
)
model._change_in_channels(in_channels)
return model
return _model_fn
for model_name in ['efficientnet_b' + str(i) for i in range(9)]:
locals()[model_name] = _create_model_fn(model_name)
| 1,709 | 37.863636 | 78 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/efficientnet_pytorch/utils.py | """utils.py - Helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
################################################################################
# Help functions for model architecture
################################################################################
# GlobalParams and BlockArgs: Two namedtuples
# Swish and MemoryEfficientSwish: Two implementations of the method
# round_filters and round_repeats:
# Functions to calculate params for scaling model width and depth ! ! !
# get_width_and_height_from_size and calculate_output_image_size
# drop_connect: A structural design
# get_same_padding_conv2d:
# Conv2dDynamicSamePadding
# Conv2dStaticSamePadding
# get_same_padding_maxPool2d:
# MaxPool2dDynamicSamePadding
# MaxPool2dStaticSamePadding
# It's an additional function, not used in EfficientNet,
# but can be used in other model (such as EfficientDet).
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon',
'drop_connect_rate', 'depth_divisor', 'min_depth', 'include_top'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'num_repeat', 'kernel_size', 'stride', 'expand_ratio',
'input_filters', 'output_filters', 'se_ratio', 'id_skip'])
# Set GlobalParams and BlockArgs's defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
# Swish activation function
if hasattr(nn, 'SiLU'):
Swish = nn.SiLU
else:
# For compatibility with old PyTorch versions
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def round_filters(filters, global_params):
"""Calculate and round number of filters based on width multiplier.
Use width_coefficient, depth_divisor and min_depth of global_params.
Args:
filters (int): Filters number to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new_filters: New filters number after calculating.
"""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor # pay attention to this line when using min_depth
# follow the formula transferred from official TensorFlow implementation
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert 0 <= p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_width_and_height_from_size(x):
"""Obtain height and width from x.
Args:
x (int, tuple or list): Data size.
Returns:
size: A tuple or list (H,W).
"""
if isinstance(x, int):
return x, x
if isinstance(x, list) or isinstance(x, tuple):
return x
else:
raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""Calculates the output image size when using Conv2dSamePadding with a stride.
Necessary for static padding. Thanks to mannatsingh for pointing this out.
Args:
input_image_size (int, tuple or list): Size of input image.
stride (int, tuple or list): Conv2d operation's stride.
Returns:
output_image_size: A list [H,W].
"""
if input_image_size is None:
return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
# Note:
# The following 'SamePadding' functions make output size equal ceil(input size/stride).
# Only when stride equals 1, can the output size be the same as input size.
# Don't be confused by their function names ! ! !
def get_same_padding_conv2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
Conv2dDynamicSamePadding or Conv2dStaticSamePadding.
"""
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
# Tips for 'SAME' mode padding.
# Given the following:
# i: width or height
# s: stride
# k: kernel size
# d: dilation
# p: padding
# Output after Conv2d:
# o = floor((i+p-((k-1)*d+1))/s+1)
# If o equals i, i = floor((i+p-((k-1)*d+1))/s+1),
# => p = (i-1)*s+((k-1)*d+1)-i
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) # change the output size according to stride ! ! !
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
# With the same calculation as Conv2dDynamicSamePadding
def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2,
pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
def get_same_padding_maxPool2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.
"""
if image_size is None:
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = nn.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
return x
################################################################################
# Helper functions for loading model params
################################################################################
# BlockDecoder: A Class for encoding and decoding BlockArgs
# efficientnet_params: A function to query compound coefficient
# get_model_params and efficientnet:
# Functions to get BlockArgs and GlobalParams for efficientnet
# url_map and url_map_advprop: Dicts of url_map for pretrained weights
# load_pretrained_weights: A function to load pretrained weights
class BlockDecoder(object):
"""Block Decoder for readability,
straight from the official TensorFlow repository.
"""
@staticmethod
def _decode_block_string(block_string):
"""Get a block through a string notation of arguments.
Args:
block_string (str): A string notation of arguments.
Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.
Returns:
BlockArgs: The namedtuple defined at the top of this file.
"""
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
num_repeat=int(options['r']),
kernel_size=int(options['k']),
stride=[int(options['s'][0])],
expand_ratio=int(options['e']),
input_filters=int(options['i']),
output_filters=int(options['o']),
se_ratio=float(options['se']) if 'se' in options else None,
id_skip=('noskip' not in block_string))
@staticmethod
def _encode_block_string(block):
"""Encode a block to a string.
Args:
block (namedtuple): A BlockArgs type argument.
Returns:
block_string: A String form of BlockArgs.
"""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""Decode a list of string notations to specify blocks inside the network.
Args:
string_list (list[str]): A list of strings, each string is a notation of block.
Returns:
blocks_args: A list of BlockArgs namedtuples of block args.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""Encode a list of BlockArgs to a list of strings.
Args:
blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.
Returns:
block_strings: A list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet_params(model_name):
"""Map EfficientNet model name to parameter coefficients.
Args:
model_name (str): Model name to be queried.
Returns:
params_dict[model_name]: A (width,depth,res,dropout) tuple.
"""
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None,
dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000, include_top=True):
"""Create BlockArgs and GlobalParams for efficientnet model.
Args:
width_coefficient (float)
depth_coefficient (float)
image_size (int)
dropout_rate (float)
drop_connect_rate (float)
num_classes (int)
Meaning as the name suggests.
Returns:
blocks_args, global_params.
"""
# Blocks args for the whole model(efficientnet-b0 by default)
# It will be modified in the construction of EfficientNet Class according to model
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25',
'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25',
'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25',
'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
image_size=image_size,
dropout_rate=dropout_rate,
num_classes=num_classes,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
depth_divisor=8,
min_depth=None,
include_top=include_top,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model name.
Args:
model_name (str): Model's name.
override_params (dict): A dict to modify global_params.
Returns:
blocks_args, global_params
"""
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: {}'.format(model_name))
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
# train with Standard methods
# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks)
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
# train with Adversarial Examples(AdvProp)
# check more details in paper(Adversarial Examples Improve Image Recognition)
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
# TODO: add the petrained weights url map of 'efficientnet-l2'
def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False, verbose=True):
"""Loads pretrained weights from weights path or download using url.
Args:
model (Module): The whole model of efficientnet.
model_name (str): Model name of efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.
advprop (bool): Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
"""
if isinstance(weights_path, str):
state_dict = torch.load(weights_path)
else:
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
assert not ret.missing_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(
['_fc.weight', '_fc.bias']), 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys)
assert not ret.unexpected_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.unexpected_keys)
if verbose:
print('Loaded pretrained weights for {}'.format(model_name))
| 24,957 | 39.450567 | 130 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/efficientnet_pytorch/model.py | """model.py - Model and module class for EfficientNet.
They are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import torch
from torch import nn
from torch.nn import functional as F
from .utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
calculate_output_image_size
)
VALID_MODELS = (
'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
'efficientnet-b8',
# Support the construction of 'efficientnet-l2' without pretrained weights
'efficientnet-l2'
)
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""EfficientNet model.
Most easily loaded with the .from_name or .from_pretrained methods.
Args:
blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
global_params (namedtuple): A set of GlobalParams shared between blocks.
References:
[1] https://arxiv.org/abs/1905.11946 (EfficientNet)
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> model.eval()
>>> outputs = model(inputs)
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Get stem static or dynamic convolution depending on image size
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
image_size = calculate_output_image_size(image_size, 2)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if block_args.num_repeat > 1: # modify block_args to keep same output size
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
# image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
if self._global_params.include_top:
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
# set activation to memory efficient swish by default
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"""Use convolution layer to extract features
from reduction levels i in [1, 2, 3, 4, 5].
Args:
inputs (tensor): Input tensor.
Returns:
Dictionary of last intermediate features
with reduction levels i in [1, 2, 3, 4, 5].
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> endpoints = model.extract_endpoints(inputs)
>>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
>>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
>>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
>>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
>>> print(endpoints['reduction_5'].shape) # torch.Size([1, 320, 7, 7])
>>> print(endpoints['reduction_6'].shape) # torch.Size([1, 1280, 7, 7])
"""
endpoints = dict()
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
prev_x = x
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
elif idx == len(self._blocks) - 1:
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
prev_x = x
# Head
x = self._swish(self._bn1(self._conv_head(x)))
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
return endpoints
def extract_features(self, inputs):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
if self._global_params.include_top:
x = x.flatten(start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, in_channels=3, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
in_channels (int): Input data's channel number.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
An efficientnet model.
"""
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
model = cls(blocks_args, global_params)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, weights_path=None, advprop=False,
in_channels=3, num_classes=1000, **override_params):
"""Create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
advprop (bool):
Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
in_channels (int): Input data's channel number.
num_classes (int):
Number of categories for classification.
It controls the output size for final linear layer.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
A pretrained efficientnet model.
"""
model = cls.from_name(model_name, num_classes=num_classes, **override_params)
load_pretrained_weights(model, model_name, weights_path=weights_path,
load_fc=(num_classes == 1000), advprop=advprop)
model._change_in_channels(in_channels)
return model
@classmethod
def get_image_size(cls, model_name):
"""Get the input image size for a given efficientnet model.
Args:
model_name (str): Name for efficientnet.
Returns:
Input image size (resolution).
"""
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
"""Validates model name.
Args:
model_name (str): Name for efficientnet.
Returns:
bool: Is a valid name or not.
"""
if model_name not in VALID_MODELS:
raise ValueError('model_name should be one of: ' + ', '.join(VALID_MODELS))
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
| 17,388 | 40.402381 | 107 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/efficientnet_pytorch/__init__.py | __version__ = "0.7.1"
from .model import EfficientNet, VALID_MODELS
from .utils import (
GlobalParams,
BlockArgs,
BlockDecoder,
efficientnet,
get_model_params,
)
| 182 | 17.3 | 45 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/examples/imagenet/main.py | """
Evaluate on ImageNet. Note that at the moment, training is not implemented (I am working on it).
that being said, evaluation is working.
"""
import argparse
import os
import random
import shutil
import time
import warnings
import PIL
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
help='model architecture (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=224, type=int,
help='image size')
parser.add_argument('--advprop', default=False, action='store_true',
help='use advprop or not')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if 'efficientnet' in args.arch: # NEW
if args.pretrained:
model = EfficientNet.from_pretrained(args.arch, advprop=args.advprop)
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = EfficientNet.from_name(args.arch)
else:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if args.advprop:
normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if 'efficientnet' in args.arch:
image_size = EfficientNet.get_image_size(args.arch)
else:
image_size = args.image_size
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_transforms = transforms.Compose([
transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
])
print('Using image size', image_size)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
res = validate(val_loader, model, criterion, args)
with open('res.txt', 'w') as f:
print(res, file=f)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 17,107 | 37.531532 | 96 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tests/test_model.py | from collections import OrderedDict
import pytest
import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
# -- fixtures -------------------------------------------------------------------------------------
@pytest.fixture(scope='module', params=[x for x in range(4)])
def model(request):
return 'efficientnet-b{}'.format(request.param)
@pytest.fixture(scope='module', params=[True, False])
def pretrained(request):
return request.param
@pytest.fixture(scope='function')
def net(model, pretrained):
return EfficientNet.from_pretrained(model) if pretrained else EfficientNet.from_name(model)
# -- tests ----------------------------------------------------------------------------------------
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_forward(net, img_size):
"""Test `.forward()` doesn't throw an error"""
data = torch.zeros((1, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
def test_dropout_training(net):
"""Test dropout `.training` is set by `.train()` on parent `nn.module`"""
net.train()
assert net._dropout.training == True
def test_dropout_eval(net):
"""Test dropout `.training` is set by `.eval()` on parent `nn.module`"""
net.eval()
assert net._dropout.training == False
def test_dropout_update(net):
"""Test dropout `.training` is updated by `.train()` and `.eval()` on parent `nn.module`"""
net.train()
assert net._dropout.training == True
net.eval()
assert net._dropout.training == False
net.train()
assert net._dropout.training == True
net.eval()
assert net._dropout.training == False
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_modify_dropout(net, img_size):
"""Test ability to modify dropout and fc modules of network"""
dropout = nn.Sequential(OrderedDict([
('_bn2', nn.BatchNorm1d(net._bn1.num_features)),
('_drop1', nn.Dropout(p=net._global_params.dropout_rate)),
('_linear1', nn.Linear(net._bn1.num_features, 512)),
('_relu', nn.ReLU()),
('_bn3', nn.BatchNorm1d(512)),
('_drop2', nn.Dropout(p=net._global_params.dropout_rate / 2))
]))
fc = nn.Linear(512, net._global_params.num_classes)
net._dropout = dropout
net._fc = fc
data = torch.zeros((2, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_modify_pool(net, img_size):
"""Test ability to modify pooling module of network"""
class AdaptiveMaxAvgPool(nn.Module):
def __init__(self):
super().__init__()
self.ada_avgpool = nn.AdaptiveAvgPool2d(1)
self.ada_maxpool = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
avg_x = self.ada_avgpool(x)
max_x = self.ada_maxpool(x)
x = torch.cat((avg_x, max_x), dim=1)
return x
avg_pooling = AdaptiveMaxAvgPool()
fc = nn.Linear(net._fc.in_features * 2, net._global_params.num_classes)
net._avg_pooling = avg_pooling
net._fc = fc
data = torch.zeros((2, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
@pytest.mark.parametrize('img_size', [224, 256, 512])
def test_extract_endpoints(net, img_size):
"""Test `.extract_endpoints()` doesn't throw an error"""
data = torch.zeros((1, 3, img_size, img_size))
endpoints = net.extract_endpoints(data)
assert not torch.isnan(endpoints['reduction_1']).any()
assert not torch.isnan(endpoints['reduction_2']).any()
assert not torch.isnan(endpoints['reduction_3']).any()
assert not torch.isnan(endpoints['reduction_4']).any()
assert not torch.isnan(endpoints['reduction_5']).any()
assert endpoints['reduction_1'].size(2) == img_size // 2
assert endpoints['reduction_2'].size(2) == img_size // 4
assert endpoints['reduction_3'].size(2) == img_size // 8
assert endpoints['reduction_4'].size(2) == img_size // 16
assert endpoints['reduction_5'].size(2) == img_size // 32
| 4,122 | 31.984 | 99 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/load_tf_weights_tf1.py | import numpy as np
import tensorflow as tf
import torch
def load_param(checkpoint_file, conversion_table, model_name):
"""
Load parameters according to conversion_table.
Args:
checkpoint_file (string): pretrained checkpoint model file in tensorflow
conversion_table (dict): { pytorch tensor in a model : checkpoint variable name }
"""
for pyt_param, tf_param_name in conversion_table.items():
tf_param_name = str(model_name) + '/' + tf_param_name
tf_param = tf.train.load_variable(checkpoint_file, tf_param_name)
if 'conv' in tf_param_name and 'kernel' in tf_param_name:
tf_param = np.transpose(tf_param, (3, 2, 0, 1))
if 'depthwise' in tf_param_name:
tf_param = np.transpose(tf_param, (1, 0, 2, 3))
elif tf_param_name.endswith('kernel'): # for weight(kernel), we should do transpose
tf_param = np.transpose(tf_param)
assert pyt_param.size() == tf_param.shape, \
'Dim Mismatch: %s vs %s ; %s' % (tuple(pyt_param.size()), tf_param.shape, tf_param_name)
pyt_param.data = torch.from_numpy(tf_param)
def load_efficientnet(model, checkpoint_file, model_name):
"""
Load PyTorch EfficientNet from TensorFlow checkpoint file
"""
# This will store the enire conversion table
conversion_table = {}
merge = lambda dict1, dict2: {**dict1, **dict2}
# All the weights not in the conv blocks
conversion_table_for_weights_outside_blocks = {
model._conv_stem.weight: 'stem/conv2d/kernel', # [3, 3, 3, 32]),
model._bn0.bias: 'stem/tpu_batch_normalization/beta', # [32]),
model._bn0.weight: 'stem/tpu_batch_normalization/gamma', # [32]),
model._bn0.running_mean: 'stem/tpu_batch_normalization/moving_mean', # [32]),
model._bn0.running_var: 'stem/tpu_batch_normalization/moving_variance', # [32]),
model._conv_head.weight: 'head/conv2d/kernel', # [1, 1, 320, 1280]),
model._bn1.bias: 'head/tpu_batch_normalization/beta', # [1280]),
model._bn1.weight: 'head/tpu_batch_normalization/gamma', # [1280]),
model._bn1.running_mean: 'head/tpu_batch_normalization/moving_mean', # [32]),
model._bn1.running_var: 'head/tpu_batch_normalization/moving_variance', # [32]),
model._fc.bias: 'head/dense/bias', # [1000]),
model._fc.weight: 'head/dense/kernel', # [1280, 1000]),
}
conversion_table = merge(conversion_table, conversion_table_for_weights_outside_blocks)
# The first conv block is special because it does not have _expand_conv
conversion_table_for_first_block = {
model._blocks[0]._project_conv.weight: 'blocks_0/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[0]._depthwise_conv.weight: 'blocks_0/depthwise_conv2d/depthwise_kernel', # [3, 3, 32, 1]),
model._blocks[0]._se_reduce.bias: 'blocks_0/se/conv2d/bias', # , [8]),
model._blocks[0]._se_reduce.weight: 'blocks_0/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[0]._se_expand.bias: 'blocks_0/se/conv2d_1/bias', # , [32]),
model._blocks[0]._se_expand.weight: 'blocks_0/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[0]._bn1.bias: 'blocks_0/tpu_batch_normalization/beta', # [32]),
model._blocks[0]._bn1.weight: 'blocks_0/tpu_batch_normalization/gamma', # [32]),
model._blocks[0]._bn1.running_mean: 'blocks_0/tpu_batch_normalization/moving_mean',
model._blocks[0]._bn1.running_var: 'blocks_0/tpu_batch_normalization/moving_variance',
model._blocks[0]._bn2.bias: 'blocks_0/tpu_batch_normalization_1/beta', # [16]),
model._blocks[0]._bn2.weight: 'blocks_0/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[0]._bn2.running_mean: 'blocks_0/tpu_batch_normalization_1/moving_mean',
model._blocks[0]._bn2.running_var: 'blocks_0/tpu_batch_normalization_1/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_for_first_block)
# Conv blocks
for i in range(len(model._blocks)):
is_first_block = '_expand_conv.weight' not in [n for n, p in model._blocks[i].named_parameters()]
if is_first_block:
conversion_table_block = {
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
# [3, 3, 32, 1]),
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias', # , [8]),
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias', # , [32]),
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta', # [32]),
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma', # [32]),
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta', # [16]),
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
}
else:
conversion_table_block = {
model._blocks[i]._expand_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel',
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d_1/kernel',
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias',
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel',
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias',
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel',
model._blocks[i]._bn0.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta',
model._blocks[i]._bn0.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma',
model._blocks[i]._bn0.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn0.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta',
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma',
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_2/beta',
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_2/gamma',
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_block)
# Load TensorFlow parameters into PyTorch model
load_param(checkpoint_file, conversion_table, model_name)
return conversion_table
def load_and_save_temporary_tensorflow_model(model_name, model_ckpt, example_img= '../../example/img.jpg'):
""" Loads and saves a TensorFlow model. """
image_files = [example_img]
eval_ckpt_driver = eval_ckpt_main.EvalCkptDriver(model_name)
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = eval_ckpt_driver.build_dataset(image_files, [0] * len(image_files), False)
probs = eval_ckpt_driver.build_model(images, is_training=False)
sess.run(tf.global_variables_initializer())
print(model_ckpt)
eval_ckpt_driver.restore_model(sess, model_ckpt)
tf.train.Saver().save(sess, 'tmp/model.ckpt')
if __name__ == '__main__':
import sys
import argparse
sys.path.append('original_tf')
import eval_ckpt_main
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(
description='Convert TF model to PyTorch model and save for easier future loading')
parser.add_argument('--model_name', type=str, default='efficientnet-b0',
help='efficientnet-b{N}, where N is an integer 0 <= N <= 8')
parser.add_argument('--tf_checkpoint', type=str, default='pretrained_tensorflow/efficientnet-b0/',
help='checkpoint file path')
parser.add_argument('--output_file', type=str, default='pretrained_pytorch/efficientnet-b0.pth',
help='output PyTorch model file name')
args = parser.parse_args()
# Build model
model = EfficientNet.from_name(args.model_name)
# Load and save temporary TensorFlow file due to TF nuances
print(args.tf_checkpoint)
load_and_save_temporary_tensorflow_model(args.model_name, args.tf_checkpoint)
# Load weights
load_efficientnet(model, 'tmp/model.ckpt', model_name=args.model_name)
print('Loaded TF checkpoint weights')
# Save PyTorch file
torch.save(model.state_dict(), args.output_file)
print('Saved model to', args.output_file)
| 10,344 | 58.797688 | 126 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/load_tf_weights.py | import numpy as np
import tensorflow as tf
import torch
tf.compat.v1.disable_v2_behavior()
def load_param(checkpoint_file, conversion_table, model_name):
"""
Load parameters according to conversion_table.
Args:
checkpoint_file (string): pretrained checkpoint model file in tensorflow
conversion_table (dict): { pytorch tensor in a model : checkpoint variable name }
"""
for pyt_param, tf_param_name in conversion_table.items():
tf_param_name = str(model_name) + '/' + tf_param_name
tf_param = tf.train.load_variable(checkpoint_file, tf_param_name)
if 'conv' in tf_param_name and 'kernel' in tf_param_name:
tf_param = np.transpose(tf_param, (3, 2, 0, 1))
if 'depthwise' in tf_param_name:
tf_param = np.transpose(tf_param, (1, 0, 2, 3))
elif tf_param_name.endswith('kernel'): # for weight(kernel), we should do transpose
tf_param = np.transpose(tf_param)
assert pyt_param.size() == tf_param.shape, \
'Dim Mismatch: %s vs %s ; %s' % (tuple(pyt_param.size()), tf_param.shape, tf_param_name)
pyt_param.data = torch.from_numpy(tf_param)
def load_efficientnet(model, checkpoint_file, model_name):
"""
Load PyTorch EfficientNet from TensorFlow checkpoint file
"""
# This will store the enire conversion table
conversion_table = {}
merge = lambda dict1, dict2: {**dict1, **dict2}
# All the weights not in the conv blocks
conversion_table_for_weights_outside_blocks = {
model._conv_stem.weight: 'stem/conv2d/kernel', # [3, 3, 3, 32]),
model._bn0.bias: 'stem/tpu_batch_normalization/beta', # [32]),
model._bn0.weight: 'stem/tpu_batch_normalization/gamma', # [32]),
model._bn0.running_mean: 'stem/tpu_batch_normalization/moving_mean', # [32]),
model._bn0.running_var: 'stem/tpu_batch_normalization/moving_variance', # [32]),
model._conv_head.weight: 'head/conv2d/kernel', # [1, 1, 320, 1280]),
model._bn1.bias: 'head/tpu_batch_normalization/beta', # [1280]),
model._bn1.weight: 'head/tpu_batch_normalization/gamma', # [1280]),
model._bn1.running_mean: 'head/tpu_batch_normalization/moving_mean', # [32]),
model._bn1.running_var: 'head/tpu_batch_normalization/moving_variance', # [32]),
model._fc.bias: 'head/dense/bias', # [1000]),
model._fc.weight: 'head/dense/kernel', # [1280, 1000]),
}
conversion_table = merge(conversion_table, conversion_table_for_weights_outside_blocks)
# The first conv block is special because it does not have _expand_conv
conversion_table_for_first_block = {
model._blocks[0]._project_conv.weight: 'blocks_0/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[0]._depthwise_conv.weight: 'blocks_0/depthwise_conv2d/depthwise_kernel', # [3, 3, 32, 1]),
model._blocks[0]._se_reduce.bias: 'blocks_0/se/conv2d/bias', # , [8]),
model._blocks[0]._se_reduce.weight: 'blocks_0/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[0]._se_expand.bias: 'blocks_0/se/conv2d_1/bias', # , [32]),
model._blocks[0]._se_expand.weight: 'blocks_0/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[0]._bn1.bias: 'blocks_0/tpu_batch_normalization/beta', # [32]),
model._blocks[0]._bn1.weight: 'blocks_0/tpu_batch_normalization/gamma', # [32]),
model._blocks[0]._bn1.running_mean: 'blocks_0/tpu_batch_normalization/moving_mean',
model._blocks[0]._bn1.running_var: 'blocks_0/tpu_batch_normalization/moving_variance',
model._blocks[0]._bn2.bias: 'blocks_0/tpu_batch_normalization_1/beta', # [16]),
model._blocks[0]._bn2.weight: 'blocks_0/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[0]._bn2.running_mean: 'blocks_0/tpu_batch_normalization_1/moving_mean',
model._blocks[0]._bn2.running_var: 'blocks_0/tpu_batch_normalization_1/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_for_first_block)
# Conv blocks
for i in range(len(model._blocks)):
is_first_block = '_expand_conv.weight' not in [n for n, p in model._blocks[i].named_parameters()]
if is_first_block:
conversion_table_block = {
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel', # 1, 1, 32, 16]),
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
# [3, 3, 32, 1]),
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias', # , [8]),
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel', # , [1, 1, 32, 8]),
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias', # , [32]),
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel', # , [1, 1, 8, 32]),
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta', # [32]),
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma', # [32]),
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta', # [16]),
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma', # [16]),
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
}
else:
conversion_table_block = {
model._blocks[i]._expand_conv.weight: 'blocks_' + str(i) + '/conv2d/kernel',
model._blocks[i]._project_conv.weight: 'blocks_' + str(i) + '/conv2d_1/kernel',
model._blocks[i]._depthwise_conv.weight: 'blocks_' + str(i) + '/depthwise_conv2d/depthwise_kernel',
model._blocks[i]._se_reduce.bias: 'blocks_' + str(i) + '/se/conv2d/bias',
model._blocks[i]._se_reduce.weight: 'blocks_' + str(i) + '/se/conv2d/kernel',
model._blocks[i]._se_expand.bias: 'blocks_' + str(i) + '/se/conv2d_1/bias',
model._blocks[i]._se_expand.weight: 'blocks_' + str(i) + '/se/conv2d_1/kernel',
model._blocks[i]._bn0.bias: 'blocks_' + str(i) + '/tpu_batch_normalization/beta',
model._blocks[i]._bn0.weight: 'blocks_' + str(i) + '/tpu_batch_normalization/gamma',
model._blocks[i]._bn0.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_mean',
model._blocks[i]._bn0.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization/moving_variance',
model._blocks[i]._bn1.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_1/beta',
model._blocks[i]._bn1.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_1/gamma',
model._blocks[i]._bn1.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_mean',
model._blocks[i]._bn1.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_1/moving_variance',
model._blocks[i]._bn2.bias: 'blocks_' + str(i) + '/tpu_batch_normalization_2/beta',
model._blocks[i]._bn2.weight: 'blocks_' + str(i) + '/tpu_batch_normalization_2/gamma',
model._blocks[i]._bn2.running_mean: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_mean',
model._blocks[i]._bn2.running_var: 'blocks_' + str(i) + '/tpu_batch_normalization_2/moving_variance',
}
conversion_table = merge(conversion_table, conversion_table_block)
# Load TensorFlow parameters into PyTorch model
load_param(checkpoint_file, conversion_table, model_name)
return conversion_table
def load_and_save_temporary_tensorflow_model(model_name, model_ckpt, example_img= '../../example/img.jpg'):
""" Loads and saves a TensorFlow model. """
image_files = [example_img]
eval_ckpt_driver = eval_ckpt_main.EvalCkptDriver(model_name)
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
images, labels = eval_ckpt_driver.build_dataset(image_files, [0] * len(image_files), False)
probs = eval_ckpt_driver.build_model(images, is_training=False)
sess.run(tf.compat.v1.global_variables_initializer())
print(model_ckpt)
eval_ckpt_driver.restore_model(sess, model_ckpt)
tf.compat.v1.train.Saver().save(sess, 'tmp/model.ckpt')
if __name__ == '__main__':
import sys
import argparse
sys.path.append('original_tf')
import eval_ckpt_main
from efficientnet_pytorch import EfficientNet
parser = argparse.ArgumentParser(
description='Convert TF model to PyTorch model and save for easier future loading')
parser.add_argument('--model_name', type=str, default='efficientnet-b0',
help='efficientnet-b{N}, where N is an integer 0 <= N <= 8')
parser.add_argument('--tf_checkpoint', type=str, default='pretrained_tensorflow/efficientnet-b0/',
help='checkpoint file path')
parser.add_argument('--output_file', type=str, default='pretrained_pytorch/efficientnet-b0.pth',
help='output PyTorch model file name')
args = parser.parse_args()
# Build model
model = EfficientNet.from_name(args.model_name)
# Load and save temporary TensorFlow file due to TF nuances
print(args.tf_checkpoint)
load_and_save_temporary_tensorflow_model(args.model_name, args.tf_checkpoint)
# Load weights
load_efficientnet(model, 'tmp/model.ckpt', model_name=args.model_name)
print('Loaded TF checkpoint weights')
# Save PyTorch file
torch.save(model.state_dict(), args.output_file)
print('Saved model to', args.output_file)
| 10,410 | 58.491429 | 126 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/eval_ckpt_main.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eval checkpoint driver.
This is an example evaluation script for users to understand the EfficientNet
model checkpoints on CPU. To serve EfficientNet, please consider to export a
`SavedModel` from checkpoints and use tf-serving to serve.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
import efficientnet_builder
import preprocessing
tf.compat.v1.disable_v2_behavior()
flags.DEFINE_string('model_name', 'efficientnet-b0', 'Model name to eval.')
flags.DEFINE_string('runmode', 'examples', 'Running mode: examples or imagenet')
flags.DEFINE_string('imagenet_eval_glob', None,
'Imagenet eval image glob, '
'such as /imagenet/ILSVRC2012*.JPEG')
flags.DEFINE_string('imagenet_eval_label', None,
'Imagenet eval label file path, '
'such as /imagenet/ILSVRC2012_validation_ground_truth.txt')
flags.DEFINE_string('ckpt_dir', '/tmp/ckpt/', 'Checkpoint folders')
flags.DEFINE_string('example_img', '/tmp/panda.jpg',
'Filepath for a single example image.')
flags.DEFINE_string('labels_map_file', '/tmp/labels_map.txt',
'Labels map from label id to its meaning.')
flags.DEFINE_integer('num_images', 5000,
'Number of images to eval. Use -1 to eval all images.')
FLAGS = flags.FLAGS
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
num_classes: int. Number of classes, default to 1000 for ImageNet.
image_size: int. Input image size, determined by model name.
"""
def __init__(self, model_name='efficientnet-b0', batch_size=1):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = 1000
# Model Scaling parameters
_, _, self.image_size, _ = efficientnet_builder.efficientnet_params(
model_name)
def restore_model(self, sess, ckpt_dir):
"""Restore variables from checkpoint dir."""
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
ema_vars = tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection('moving_vars')
for v in tf.compat.v1.global_variables():
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
ema_vars = list(set(ema_vars))
var_dict = ema.variables_to_restore(ema_vars)
saver = tf.compat.v1.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
def build_model(self, features, is_training):
"""Build model with input features."""
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
logits, _ = efficientnet_builder.build_model(
features, self.model_name, is_training)
probs = tf.nn.softmax(logits)
probs = tf.squeeze(probs)
return probs
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.compat.v1.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.io.read_file(filename)
image_decoded = preprocessing.preprocess_image(
image_string, is_training, self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size)
iterator = dataset.make_one_shot_iterator()
#iterator = iter(dataset)
images, labels = iterator.get_next()
return images, labels
def run_inference(self, ckpt_dir, image_files, labels):
"""Build and run inference on the target images and labels."""
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
sess.run(tf.global_variables_initializer())
self.restore_model(sess, ckpt_dir)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5])
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(model_name, ckpt_dir, image_files, labels_map_file):
"""Eval a list of example images.
Args:
model_name: str. The name of model to eval.
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
eval_ckpt_driver = EvalCkptDriver(model_name)
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = eval_ckpt_driver.run_inference(
ckpt_dir, image_files, [0] * len(image_files))
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(
j, pred_prob[i][j] * 100, classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(model_name,
ckpt_dir,
imagenet_eval_glob,
imagenet_eval_label,
num_images):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
model_name: str. The name of model to eval.
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole dataset.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
eval_ckpt_driver = EvalCkptDriver(model_name)
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = eval_ckpt_driver.run_inference(ckpt_dir, image_files, labels)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.ERROR)
if FLAGS.runmode == 'examples':
# Run inference for an example image.
eval_example_images(FLAGS.model_name, FLAGS.ckpt_dir, [FLAGS.example_img],
FLAGS.labels_map_file)
elif FLAGS.runmode == 'imagenet':
# Run inference for imagenet.
eval_imagenet(FLAGS.model_name, FLAGS.ckpt_dir, FLAGS.imagenet_eval_glob,
FLAGS.imagenet_eval_label, FLAGS.num_images)
else:
print('must specify runmode: examples or imagenet')
if __name__ == '__main__':
app.run(main)
| 8,644 | 37.252212 | 94 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/efficientnet_builder.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Builder for EfficientNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import re
from absl import logging
import numpy as np
import six
import tensorflow.compat.v1 as tf
import efficientnet_model
import utils
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def efficientnet_params(model_name):
"""Get efficientnet params based on model name."""
params_dict = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
"""Block Decoder for readability."""
def _decode_block_string(self, block_string):
"""Gets a block through a string notation of arguments."""
if six.PY2:
assert isinstance(block_string, (str, unicode))
else:
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
if 's' not in options or len(options['s']) != 2:
raise ValueError('Strides options should be a pair of integers.')
return efficientnet_model.BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
strides=[int(options['s'][0]),
int(options['s'][1])],
conv_type=int(options['c']) if 'c' in options else 0,
fused_conv=int(options['f']) if 'f' in options else 0,
super_pixel=int(options['p']) if 'p' in options else 0,
condconv=('cc' in block_string))
def _encode_block_string(self, block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters,
'c%d' % block.conv_type,
'f%d' % block.fused_conv,
'p%d' % block.super_pixel,
]
if block.se_ratio > 0 and block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False: # pylint: disable=g-bool-id-comparison
args.append('noskip')
if block.condconv:
args.append('cc')
return '_'.join(args)
def decode(self, string_list):
"""Decodes a list of string notations to specify blocks inside the network.
Args:
string_list: a list of strings, each string is a notation of block.
Returns:
A list of namedtuples to represent blocks arguments.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(self._decode_block_string(block_string))
return blocks_args
def encode(self, blocks_args):
"""Encodes a list of Blocks to a list of strings.
Args:
blocks_args: A list of namedtuples to represent blocks arguments.
Returns:
a list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(self._encode_block_string(block))
return block_strings
def swish(features, use_native=True, use_hard=False):
"""Computes the Swish activation function.
We provide three alternnatives:
- Native tf.nn.swish, use less memory during training than composable swish.
- Quantization friendly hard swish.
- A composable swish, equivalant to tf.nn.swish, but more general for
finetuning and TF-Hub.
Args:
features: A `Tensor` representing preactivation values.
use_native: Whether to use the native swish from tf.nn that uses a custom
gradient to reduce memory usage, or to use customized swish that uses
default TensorFlow gradient computation.
use_hard: Whether to use quantization-friendly hard swish.
Returns:
The activation value.
"""
if use_native and use_hard:
raise ValueError('Cannot specify both use_native and use_hard.')
if use_native:
return tf.nn.swish(features)
if use_hard:
return features * tf.nn.relu6(features + np.float32(3)) * (1. / 6.)
features = tf.convert_to_tensor(features, name='features')
return features * tf.nn.sigmoid(features)
_DEFAULT_BLOCKS_ARGS = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
def efficientnet(width_coefficient=None,
depth_coefficient=None,
dropout_rate=0.2,
survival_prob=0.8):
"""Creates a efficientnet model."""
global_params = efficientnet_model.GlobalParams(
blocks_args=_DEFAULT_BLOCKS_ARGS,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
survival_prob=survival_prob,
data_format='channels_last',
num_classes=1000,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
relu_fn=tf.nn.swish,
# The default is TPU-specific batch norm.
# The alternative is tf.layers.BatchNormalization.
batch_norm=utils.TpuBatchNormalization, # TPU-specific requirement.
use_se=True,
clip_projection_output=False)
return global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model."""
if model_name.startswith('efficientnet'):
width_coefficient, depth_coefficient, _, dropout_rate = (
efficientnet_params(model_name))
global_params = efficientnet(
width_coefficient, depth_coefficient, dropout_rate)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included
# in global_params.
global_params = global_params._replace(**override_params)
decoder = BlockDecoder()
blocks_args = decoder.decode(global_params.blocks_args)
logging.info('global_params= %s', global_params)
return blocks_args, global_params
def build_model(images,
model_name,
training,
override_params=None,
model_dir=None,
fine_tuning=False,
features_only=False,
pooled_features_only=False):
"""A helper functiion to creates a model and returns predicted logits.
Args:
images: input images tensor.
model_name: string, the predefined model name.
training: boolean, whether the model is constructed for training.
override_params: A dictionary of params for overriding. Fields must exist in
efficientnet_model.GlobalParams.
model_dir: string, optional model dir for saving configs.
fine_tuning: boolean, whether the model is used for finetuning.
features_only: build the base feature network only (excluding final
1x1 conv layer, global pooling, dropout and fc head).
pooled_features_only: build the base network for features extraction (after
1x1 conv layer and global pooling, but before dropout and fc head).
Returns:
logits: the logits tensor of classes.
endpoints: the endpoints for each layer.
Raises:
When model_name specified an undefined model, raises NotImplementedError.
When override_params has invalid fields, raises ValueError.
"""
assert isinstance(images, tf.Tensor)
assert not (features_only and pooled_features_only)
# For backward compatibility.
if override_params and override_params.get('drop_connect_rate', None):
override_params['survival_prob'] = 1 - override_params['drop_connect_rate']
if not training or fine_tuning:
if not override_params:
override_params = {}
override_params['batch_norm'] = utils.BatchNormalization
if fine_tuning:
override_params['relu_fn'] = functools.partial(swish, use_native=False)
blocks_args, global_params = get_model_params(model_name, override_params)
if model_dir:
param_file = os.path.join(model_dir, 'model_params.txt')
if not tf.gfile.Exists(param_file):
if not tf.gfile.Exists(model_dir):
tf.gfile.MakeDirs(model_dir)
with tf.gfile.GFile(param_file, 'w') as f:
logging.info('writing to %s', param_file)
f.write('model_name= %s\n\n' % model_name)
f.write('global_params= %s\n\n' % str(global_params))
f.write('blocks_args= %s\n\n' % str(blocks_args))
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
outputs = model(
images,
training=training,
features_only=features_only,
pooled_features_only=pooled_features_only)
if features_only:
outputs = tf.identity(outputs, 'features')
elif pooled_features_only:
outputs = tf.identity(outputs, 'pooled_features')
else:
outputs = tf.identity(outputs, 'logits')
return outputs, model.endpoints
def build_model_base(images, model_name, training, override_params=None):
"""A helper functiion to create a base model and return global_pool.
Args:
images: input images tensor.
model_name: string, the predefined model name.
training: boolean, whether the model is constructed for training.
override_params: A dictionary of params for overriding. Fields must exist in
efficientnet_model.GlobalParams.
Returns:
features: global pool features.
endpoints: the endpoints for each layer.
Raises:
When model_name specified an undefined model, raises NotImplementedError.
When override_params has invalid fields, raises ValueError.
"""
assert isinstance(images, tf.Tensor)
# For backward compatibility.
if override_params and override_params.get('drop_connect_rate', None):
override_params['survival_prob'] = 1 - override_params['drop_connect_rate']
blocks_args, global_params = get_model_params(model_name, override_params)
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
features = model(images, training=training, features_only=True)
features = tf.identity(features, 'features')
return features, model.endpoints
| 11,804 | 34.772727 | 80 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/efficientnet_model.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
from absl import logging
import numpy as np
import six
from six.moves import xrange
import tensorflow.compat.v1 as tf
import utils
# from condconv import condconv_layers
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'data_format',
'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor',
'min_depth', 'survival_prob', 'relu_fn', 'batch_norm', 'use_se',
'local_pooling', 'condconv_num_experts', 'clip_projection_output',
'blocks_args'
])
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio', 'conv_type', 'fused_conv',
'super_pixel', 'condconv'
])
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
def conv_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.initializers.variance_scaling uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def dense_kernel_initializer(shape, dtype=None, partition_info=None):
"""Initialization for dense kernels.
This initialization is equal to
tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
distribution='uniform').
It is written out explicitly here for clarity.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
init_range = 1.0 / np.sqrt(shape[1])
return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)
def superpixel_kernel_initializer(shape, dtype='float32', partition_info=None):
"""Initializes superpixel kernels.
This is inspired by space-to-depth transformation that is mathematically
equivalent before and after the transformation. But we do the space-to-depth
via a convolution. Moreover, we make the layer trainable instead of direct
transform, we can initialization it this way so that the model can learn not
to do anything but keep it mathematically equivalent, when improving
performance.
Args:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
Returns:
an initialization for the variable
"""
del partition_info
# use input depth to make superpixel kernel.
depth = shape[-2]
filters = np.zeros([2, 2, depth, 4 * depth], dtype=dtype)
i = np.arange(2)
j = np.arange(2)
k = np.arange(depth)
mesh = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3).T
filters[
mesh[0],
mesh[1],
mesh[2],
4 * mesh[2] + 2 * mesh[0] + mesh[1]] = 1
return filters
def round_filters(filters, global_params):
"""Round number of filters based on depth multiplier."""
orig_f = filters
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if not multiplier:
return filters
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
logging.info('round_filter input=%s output=%s', orig_f, new_filters)
return int(new_filters)
def round_repeats(repeats, global_params):
"""Round number of filters based on depth multiplier."""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
class MBConvBlock(tf.keras.layers.Layer):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, global_params):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
global_params: GlobalParams, a set of global parameters.
"""
super(MBConvBlock, self).__init__()
self._block_args = block_args
self._batch_norm_momentum = global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self._batch_norm = global_params.batch_norm
self._condconv_num_experts = global_params.condconv_num_experts
self._data_format = global_params.data_format
if self._data_format == 'channels_first':
self._channel_axis = 1
self._spatial_dims = [2, 3]
else:
self._channel_axis = -1
self._spatial_dims = [1, 2]
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._has_se = (
global_params.use_se and self._block_args.se_ratio is not None and
0 < self._block_args.se_ratio <= 1)
self._clip_projection_output = global_params.clip_projection_output
self.endpoints = None
self.conv_cls = tf.layers.Conv2D
self.depthwise_conv_cls = utils.DepthwiseConv2D
if self._block_args.condconv:
self.conv_cls = functools.partial(
condconv_layers.CondConv2D, num_experts=self._condconv_num_experts)
self.depthwise_conv_cls = functools.partial(
condconv_layers.DepthwiseCondConv2D,
num_experts=self._condconv_num_experts)
# Builds the block accordings to arguments.
self._build()
def block_args(self):
return self._block_args
def _build(self):
"""Builds block according to the arguments."""
if self._block_args.super_pixel == 1:
self._superpixel = tf.layers.Conv2D(
self._block_args.input_filters,
kernel_size=[2, 2],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bnsp = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
if self._block_args.condconv:
# Add the example-dependent routing function
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._data_format)
self._routing_fn = tf.layers.Dense(
self._condconv_num_experts, activation=tf.nn.sigmoid)
filters = self._block_args.input_filters * self._block_args.expand_ratio
kernel_size = self._block_args.kernel_size
# Fused expansion phase. Called if using fused convolutions.
self._fused_conv = self.conv_cls(
filters=filters,
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
# Expansion phase. Called if not using fused convolutions and expansion
# phase is necessary.
self._expand_conv = self.conv_cls(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
# Depth-wise convolution phase. Called if not using fused convolutions.
self._depthwise_conv = self.depthwise_conv_cls(
kernel_size=[kernel_size, kernel_size],
strides=self._block_args.strides,
depthwise_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
if self._has_se:
num_reduced_filters = max(
1, int(self._block_args.input_filters * self._block_args.se_ratio))
# Squeeze and Excitation layer.
self._se_reduce = tf.layers.Conv2D(
num_reduced_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
self._se_expand = tf.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=True)
# Output phase.
filters = self._block_args.output_filters
self._project_conv = self.conv_cls(
filters=filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False)
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
def _call_se(self, input_tensor):
"""Call Squeeze and Excitation layer.
Args:
input_tensor: Tensor, a single input tensor for Squeeze/Excitation layer.
Returns:
A output tensor, which should have the same shape as input.
"""
se_tensor = tf.reduce_mean(input_tensor, self._spatial_dims, keepdims=True)
se_tensor = self._se_expand(self._relu_fn(self._se_reduce(se_tensor)))
logging.info('Built Squeeze and Excitation with tensor shape: %s',
(se_tensor.shape))
return tf.sigmoid(se_tensor) * input_tensor
def call(self, inputs, training=True, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block input: %s shape: %s', inputs.name, inputs.shape)
logging.info('Block input depth: %s output depth: %s',
self._block_args.input_filters,
self._block_args.output_filters)
x = inputs
fused_conv_fn = self._fused_conv
expand_conv_fn = self._expand_conv
depthwise_conv_fn = self._depthwise_conv
project_conv_fn = self._project_conv
if self._block_args.condconv:
pooled_inputs = self._avg_pooling(inputs)
routing_weights = self._routing_fn(pooled_inputs)
# Capture routing weights as additional input to CondConv layers
fused_conv_fn = functools.partial(
self._fused_conv, routing_weights=routing_weights)
expand_conv_fn = functools.partial(
self._expand_conv, routing_weights=routing_weights)
depthwise_conv_fn = functools.partial(
self._depthwise_conv, routing_weights=routing_weights)
project_conv_fn = functools.partial(
self._project_conv, routing_weights=routing_weights)
# creates conv 2x2 kernel
if self._block_args.super_pixel == 1:
with tf.variable_scope('super_pixel'):
x = self._relu_fn(
self._bnsp(self._superpixel(x), training=training))
logging.info(
'Block start with SuperPixel: %s shape: %s', x.name, x.shape)
if self._block_args.fused_conv:
# If use fused mbconv, skip expansion and use regular conv.
x = self._relu_fn(self._bn1(fused_conv_fn(x), training=training))
logging.info('Conv2D: %s shape: %s', x.name, x.shape)
else:
# Otherwise, first apply expansion and then apply depthwise conv.
if self._block_args.expand_ratio != 1:
x = self._relu_fn(self._bn0(expand_conv_fn(x), training=training))
logging.info('Expand: %s shape: %s', x.name, x.shape)
x = self._relu_fn(self._bn1(depthwise_conv_fn(x), training=training))
logging.info('DWConv: %s shape: %s', x.name, x.shape)
if self._has_se:
with tf.variable_scope('se'):
x = self._call_se(x)
self.endpoints = {'expansion_output': x}
x = self._bn2(project_conv_fn(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if all(
s == 1 for s in self._block_args.strides
) and self._block_args.input_filters == self._block_args.output_filters:
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info('Project: %s shape: %s', x.name, x.shape)
return x
class MBConvBlockWithoutDepthwise(MBConvBlock):
"""MBConv-like block without depthwise convolution and squeeze-and-excite."""
def _build(self):
"""Builds block according to the arguments."""
filters = self._block_args.input_filters * self._block_args.expand_ratio
if self._block_args.expand_ratio != 1:
# Expansion phase:
self._expand_conv = tf.layers.Conv2D(
filters,
kernel_size=[3, 3],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
# Output phase:
filters = self._block_args.output_filters
self._project_conv = tf.layers.Conv2D(
filters,
kernel_size=[1, 1],
strides=self._block_args.strides,
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon)
def call(self, inputs, training=True, survival_prob=None):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
survival_prob: float, between 0 to 1, drop connect rate.
Returns:
A output tensor.
"""
logging.info('Block input: %s shape: %s', inputs.name, inputs.shape)
if self._block_args.expand_ratio != 1:
x = self._relu_fn(self._bn0(self._expand_conv(inputs), training=training))
else:
x = inputs
logging.info('Expand: %s shape: %s', x.name, x.shape)
self.endpoints = {'expansion_output': x}
x = self._bn1(self._project_conv(x), training=training)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.identity(x)
if self._clip_projection_output:
x = tf.clip_by_value(x, -6, 6)
if self._block_args.id_skip:
if all(
s == 1 for s in self._block_args.strides
) and self._block_args.input_filters == self._block_args.output_filters:
# Apply only if skip connection presents.
if survival_prob:
x = utils.drop_connect(x, training, survival_prob)
x = tf.add(x, inputs)
logging.info('Project: %s shape: %s', x.name, x.shape)
return x
class Model(tf.keras.Model):
"""A class implements tf.keras.Model for MNAS-like model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self, blocks_args=None, global_params=None):
"""Initializes an `Model` instance.
Args:
blocks_args: A list of BlockArgs to construct block modules.
global_params: GlobalParams, a set of global parameters.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super(Model, self).__init__()
if not isinstance(blocks_args, list):
raise ValueError('blocks_args should be a list.')
self._global_params = global_params
self._blocks_args = blocks_args
self._relu_fn = global_params.relu_fn or tf.nn.swish
self._batch_norm = global_params.batch_norm
self.endpoints = None
self._build()
def _get_conv_block(self, conv_type):
conv_block_map = {0: MBConvBlock, 1: MBConvBlockWithoutDepthwise}
return conv_block_map[conv_type]
def _build(self):
"""Builds a model."""
self._blocks = []
batch_norm_momentum = self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
if self._global_params.data_format == 'channels_first':
channel_axis = 1
self._spatial_dims = [2, 3]
else:
channel_axis = -1
self._spatial_dims = [1, 2]
# Stem part.
self._conv_stem = tf.layers.Conv2D(
filters=round_filters(32, self._global_params),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False)
self._bn0 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)
# Builds blocks.
for block_args in self._blocks_args:
assert block_args.num_repeat > 0
assert block_args.super_pixel in [0, 1, 2]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(block_args.input_filters,
self._global_params)
output_filters = round_filters(block_args.output_filters,
self._global_params)
kernel_size = block_args.kernel_size
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=round_repeats(block_args.num_repeat, self._global_params))
# The first block needs to take care of stride and filter size increase.
conv_block = self._get_conv_block(block_args.conv_type)
if not block_args.super_pixel: # no super_pixel at all
self._blocks.append(conv_block(block_args, self._global_params))
else:
# if superpixel, adjust filters, kernels, and strides.
depth_factor = int(4 / block_args.strides[0] / block_args.strides[1])
block_args = block_args._replace(
input_filters=block_args.input_filters * depth_factor,
output_filters=block_args.output_filters * depth_factor,
kernel_size=((block_args.kernel_size + 1) // 2 if depth_factor > 1
else block_args.kernel_size))
# if the first block has stride-2 and super_pixel trandformation
if (block_args.strides[0] == 2 and block_args.strides[1] == 2):
block_args = block_args._replace(strides=[1, 1])
self._blocks.append(conv_block(block_args, self._global_params))
block_args = block_args._replace( # sp stops at stride-2
super_pixel=0,
input_filters=input_filters,
output_filters=output_filters,
kernel_size=kernel_size)
elif block_args.super_pixel == 1:
self._blocks.append(conv_block(block_args, self._global_params))
block_args = block_args._replace(super_pixel=2)
else:
self._blocks.append(conv_block(block_args, self._global_params))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for _ in xrange(block_args.num_repeat - 1):
self._blocks.append(conv_block(block_args, self._global_params))
# Head part.
self._conv_head = tf.layers.Conv2D(
filters=round_filters(1280, self._global_params),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv_kernel_initializer,
padding='same',
use_bias=False)
self._bn1 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon)
self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
data_format=self._global_params.data_format)
if self._global_params.num_classes:
self._fc = tf.layers.Dense(
self._global_params.num_classes,
kernel_initializer=dense_kernel_initializer)
else:
self._fc = None
if self._global_params.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(self._global_params.dropout_rate)
else:
self._dropout = None
def call(self,
inputs,
training=True,
features_only=None,
pooled_features_only=False):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
features_only: build the base feature network only.
pooled_features_only: build the base network for features extraction
(after 1x1 conv layer and global pooling, but before dropout and fc
head).
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
with tf.variable_scope('stem'):
outputs = self._relu_fn(
self._bn0(self._conv_stem(inputs), training=training))
logging.info('Built stem layers with output shape: %s', outputs.shape)
self.endpoints['stem'] = outputs
# Calls blocks.
for idx, block in enumerate(self._blocks):
is_reduction = False # reduction flag for blocks after the stem layer
# If the first block has super-pixel (space-to-depth) layer, then stem is
# the first reduction point.
if (block.block_args().super_pixel == 1 and idx == 0):
reduction_idx += 1
self.endpoints['reduction_%s' % reduction_idx] = outputs
elif ((idx == len(self._blocks) - 1) or
self._blocks[idx + 1].block_args().strides[0] > 1):
is_reduction = True
reduction_idx += 1
with tf.variable_scope('blocks_%s' % idx):
survival_prob = self._global_params.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
logging.info('block_%s survival_prob: %s', idx, survival_prob)
outputs = block.call(
outputs, training=training, survival_prob=survival_prob)
self.endpoints['block_%s' % idx] = outputs
if is_reduction:
self.endpoints['reduction_%s' % reduction_idx] = outputs
if block.endpoints:
for k, v in six.iteritems(block.endpoints):
self.endpoints['block_%s/%s' % (idx, k)] = v
if is_reduction:
self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v
self.endpoints['features'] = outputs
if not features_only:
# Calls final layers and returns logits.
with tf.variable_scope('head'):
outputs = self._relu_fn(
self._bn1(self._conv_head(outputs), training=training))
self.endpoints['head_1x1'] = outputs
if self._global_params.local_pooling:
shape = outputs.get_shape().as_list()
kernel_size = [
1, shape[self._spatial_dims[0]], shape[self._spatial_dims[1]], 1]
outputs = tf.nn.avg_pool(
outputs, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
self.endpoints['pooled_features'] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = tf.squeeze(outputs, self._spatial_dims)
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
else:
outputs = self._avg_pooling(outputs)
self.endpoints['pooled_features'] = outputs
if not pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs, training=training)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
return outputs
| 26,027 | 35.453782 | 80 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/preprocessing.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow.compat.v1 as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda
[image_size, image_size])[0])
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE,
augment_name=None,
randaug_num_layers=None, randaug_magnitude=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
if augment_name:
try:
import autoaugment # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.exception('Autoaugment is not supported in TF 2.x.')
raise e
logging.info('Apply AutoAugment policy %s', augment_name)
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
if augment_name == 'autoaugment':
logging.info('Apply AutoAugment policy %s', augment_name)
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif augment_name == 'randaugment':
image = autoaugment.distort_image_with_randaugment(
image, randaug_num_layers, randaug_magnitude)
else:
raise ValueError('Invalid value for augment_name: %s' % (augment_name))
image = tf.cast(image, dtype=input_image_type)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE,
augment_name=None,
randaug_num_layers=None,
randaug_magnitude=None):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(
image_bytes, use_bfloat16, image_size, augment_name,
randaug_num_layers, randaug_magnitude)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size)
| 9,508 | 38.293388 | 80 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
def build_learning_rate(initial_lr,
global_step,
steps_per_epoch=None,
lr_decay_type='exponential',
decay_factor=0.97,
decay_epochs=2.4,
total_steps=None,
warmup_epochs=5):
"""Build learning rate."""
if lr_decay_type == 'exponential':
assert steps_per_epoch is not None
decay_steps = steps_per_epoch * decay_epochs
lr = tf.train.exponential_decay(
initial_lr, global_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
if warmup_epochs:
logging.info('Learning rate warmup_epochs: %d', warmup_epochs)
warmup_steps = int(warmup_epochs * steps_per_epoch)
warmup_lr = (
initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
def build_optimizer(learning_rate,
optimizer_name='rmsprop',
decay=0.9,
epsilon=0.001,
momentum=0.9):
"""Build optimizer."""
if optimizer_name == 'sgd':
logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_name == 'momentum':
logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
elif optimizer_name == 'rmsprop':
logging.info('Using RMSProp optimizer')
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
epsilon)
else:
logging.fatal('Unknown optimizer: %s', optimizer_name)
return optimizer
class TpuBatchNormalization(tf.layers.BatchNormalization):
# class TpuBatchNormalization(tf.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused=False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
logging.info('TpuBatchNormalization with num_shards_per_group %s',
num_shards_per_group)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(
shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
class BatchNormalization(tf.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, name='tpu_batch_normalization', **kwargs):
super(BatchNormalization, self).__init__(name=name, **kwargs)
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
"""Archive a checkpoint if the metric is better."""
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
filenames = tf.gfile.Glob(ckpt_path + '.*')
if filenames is None:
logging.info('No files to copy for checkpoint %s', ckpt_path)
return False
# Clear the old folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.gfile.Exists(dst_dir):
tf.gfile.DeleteRecursively(dst_dir)
tf.gfile.MakeDirs(dst_dir)
# Write checkpoints.
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_name,
all_model_checkpoint_paths=[ckpt_name])
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
return True
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
"""Wrap keras DepthwiseConv2D to tf.layers."""
pass
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
image_size: int. Input image size, determined by model name.
num_classes: int. Number of classes, default to 1000 for ImageNet.
include_background_label: whether to include extra background label.
"""
def __init__(self,
model_name,
batch_size=1,
image_size=224,
num_classes=1000,
include_background_label=False):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = num_classes
self.include_background_label = include_background_label
self.image_size = image_size
def restore_model(self, sess, ckpt_dir, enable_ema=True, export_ckpt=None):
"""Restore variables from checkpoint dir."""
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
if export_ckpt:
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def build_model(self, features, is_training):
"""Build model with input features."""
del features, is_training
raise ValueError('Must be implemented by subclasses.')
def get_preprocess_fn(self):
raise ValueError('Must be implemented by subclsses.')
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
batch_drop_remainder = False
if 'condconv' in self.model_name and not is_training:
# CondConv layers can only be called with known batch dimension. Thus, we
# must drop all remaining examples that do not make up one full batch.
# To ensure all examples are evaluated, use a batch size that evenly
# divides the number of files.
batch_drop_remainder = True
num_files = len(filenames)
if num_files % self.batch_size != 0:
tf.logging.warn('Remaining examples in last batch are not being '
'evaluated.')
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
preprocess_fn = self.get_preprocess_fn()
image_decoded = preprocess_fn(
image_string, is_training, image_size=self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size,
drop_remainder=batch_drop_remainder)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self,
ckpt_dir,
image_files,
labels,
enable_ema=True,
export_ckpt=None):
"""Build and run inference on the target images and labels."""
label_offset = 1 if self.include_background_label else 0
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir, enable_ema, export_ckpt)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5] - label_offset)
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(self,
ckpt_dir,
image_files,
labels_map_file,
enable_ema=True,
export_ckpt=None):
"""Eval a list of example images.
Args:
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
enable_ema: enable expotential moving average.
export_ckpt: export ckpt folder.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = self.run_inference(
ckpt_dir, image_files, [0] * len(image_files), enable_ema, export_ckpt)
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(j, pred_prob[i][j] * 100,
classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(self, ckpt_dir, imagenet_eval_glob,
imagenet_eval_label, num_images, enable_ema, export_ckpt):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
enable_ema: enable expotential moving average.
export_ckpt: export checkpoint folder.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(
ckpt_dir, image_files, labels, enable_ema, export_ckpt)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
| 15,742 | 37.775862 | 91 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/eval_ckpt_main_tf1.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eval checkpoint driver.
This is an example evaluation script for users to understand the EfficientNet
model checkpoints on CPU. To serve EfficientNet, please consider to export a
`SavedModel` from checkpoints and use tf-serving to serve.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
import efficientnet_builder
import preprocessing
flags.DEFINE_string('model_name', 'efficientnet-b0', 'Model name to eval.')
flags.DEFINE_string('runmode', 'examples', 'Running mode: examples or imagenet')
flags.DEFINE_string('imagenet_eval_glob', None,
'Imagenet eval image glob, '
'such as /imagenet/ILSVRC2012*.JPEG')
flags.DEFINE_string('imagenet_eval_label', None,
'Imagenet eval label file path, '
'such as /imagenet/ILSVRC2012_validation_ground_truth.txt')
flags.DEFINE_string('ckpt_dir', '/tmp/ckpt/', 'Checkpoint folders')
flags.DEFINE_string('example_img', '/tmp/panda.jpg',
'Filepath for a single example image.')
flags.DEFINE_string('labels_map_file', '/tmp/labels_map.txt',
'Labels map from label id to its meaning.')
flags.DEFINE_integer('num_images', 5000,
'Number of images to eval. Use -1 to eval all images.')
FLAGS = flags.FLAGS
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
num_classes: int. Number of classes, default to 1000 for ImageNet.
image_size: int. Input image size, determined by model name.
"""
def __init__(self, model_name='efficientnet-b0', batch_size=1):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = 1000
# Model Scaling parameters
_, _, self.image_size, _ = efficientnet_builder.efficientnet_params(
model_name)
def restore_model(self, sess, ckpt_dir):
"""Restore variables from checkpoint dir."""
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
ema_vars = list(set(ema_vars))
var_dict = ema.variables_to_restore(ema_vars)
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
def build_model(self, features, is_training):
"""Build model with input features."""
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
logits, _ = efficientnet_builder.build_model(
features, self.model_name, is_training)
probs = tf.nn.softmax(logits)
probs = tf.squeeze(probs)
return probs
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = preprocessing.preprocess_image(
image_string, is_training, self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self, ckpt_dir, image_files, labels):
"""Build and run inference on the target images and labels."""
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
sess.run(tf.global_variables_initializer())
self.restore_model(sess, ckpt_dir)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5])
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(model_name, ckpt_dir, image_files, labels_map_file):
"""Eval a list of example images.
Args:
model_name: str. The name of model to eval.
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
eval_ckpt_driver = EvalCkptDriver(model_name)
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = eval_ckpt_driver.run_inference(
ckpt_dir, image_files, [0] * len(image_files))
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(
j, pred_prob[i][j] * 100, classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(model_name,
ckpt_dir,
imagenet_eval_glob,
imagenet_eval_label,
num_images):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
model_name: str. The name of model to eval.
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole dataset.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
eval_ckpt_driver = EvalCkptDriver(model_name)
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = eval_ckpt_driver.run_inference(ckpt_dir, image_files, labels)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.ERROR)
if FLAGS.runmode == 'examples':
# Run inference for an example image.
eval_example_images(FLAGS.model_name, FLAGS.ckpt_dir, [FLAGS.example_img],
FLAGS.labels_map_file)
elif FLAGS.runmode == 'imagenet':
# Run inference for imagenet.
eval_imagenet(FLAGS.model_name, FLAGS.ckpt_dir, FLAGS.imagenet_eval_glob,
FLAGS.imagenet_eval_label, FLAGS.num_images)
else:
print('must specify runmode: examples or imagenet')
if __name__ == '__main__':
app.run(main)
| 8,524 | 37.400901 | 80 | py |
EfficientNet-PyTorch | EfficientNet-PyTorch-master/tf_to_pytorch/convert_tf_to_pt/original_tf/__init__.py | 0 | 0 | 0 | py | |
neuron-importance-zsl | neuron-importance-zsl-master/mod2alpha.py | # Code to map from any modality to alphas.
# Train using class_info and alphas from a trained network
import argparse
import numpy as np
import random
random.seed(1234)
from random import shuffle
import pickle
from pprint import pprint
from dotmap import DotMap
import pdb
import csv
import os
import json
import tensorflow as tf
from scipy.io import loadmat
import ntpath
from scipy.stats import spearmanr
import glob
from tqdm import tqdm
import torchfile
import scipy.io as scio
# Fix CUB names due to mismatch in Scott Reed's caption dataset
CUB_FNAME_FIX = {'093.Clark_Nutcracker': '093.Clarks_Nutcracker',
'124.Le_Conte_Sparrow': '124.Le_Contes_Sparrow',
'180.Wilson_Warbler': '180.Wilsons_Warbler',
'125.Lincoln_Sparrow': '125.Lincolns_Sparrow',
'023.Brandt_Cormorant': '023.Brandts_Cormorant',
'178.Swainson_Warbler': '178.Swainsons_Warbler',
'122.Harris_Sparrow': '122.Harriss_Sparrow',
'113.Baird_Sparrow': '113.Bairds_Sparrow',
'123.Henslow_Sparrow': '123.Henslows_Sparrow',
'098.Scott_Oriole': '098.Scotts_Oriole',
'061.Heermann_Gull': '061.Heermanns_Gull',
'022.Chuck_will_Widow': '022.Chuck_wills_Widow',
'193.Bewick_Wren': '193.Bewicks_Wren',
'067.Anna_Hummingbird': '067.Annas_Hummingbird',
'126.Nelson_Sharp_tailed_Sparrow': '126.Nelsons_Sparrow',
'115.Brewer_Sparrow': '115.Brewers_Sparrow',
'009.Brewer_Blackbird': '009.Brewers_Blackbird'}
parser = argparse.ArgumentParser()
parser.add_argument('--config_json', default='')
def encode_attributes_class(attrdir, imlabelist, config):
im_attr = {}
cls_attr = {}
# Use class level supervision
if config.supervision=='class':
class_att_labels = []
with open(attrdir) as f:
for n, line in enumerate(f):
l = [x for x in line.rstrip().split(" ") ]
l = [x for x in l if x]
#l = l.remove('')
if config.a2t=="True":
l = [int(x) if float(x)!=-1.00 else 0 for x in l ]
else:
l = [float(x) if float(x)!=-1.00 else 0 for x in l ]
cls_attr[n]=l
class_att_labels.append(l)
class_att_labels = np.array(class_att_labels)
#class_att_avg = np.mean(class_att_labels, axis = 0)
for c in range(int(config.n_class)):
imids = [k for k,v in im_attr.items() if v['cls'] == c]
for id in imids:
im_attr[id] = {}
im_attr[id]['att']= class_att_labels[c]/np.max(class_att_labels)
return im_attr, cls_attr
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def encode_tfidf(tf_file, imlabelist, config):
# Function to encode the TF-IDF features from wikipedia articles
# Make this compatible with Ram's attribute encoding function
attrdir = './data/CUB/11083D_TFIDF.mat'
tf_idf = scio.loadmat(attrdir)['PredicateMatrix']
im_attr = {}
print('Encoding TF-IDF....')
for i in tqdm(range(len(imlabelist))):
#print(tf_idf[imlabelist[i]-1].tolist())
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = tf_idf[imlabelist[i]-1].tolist()
return im_attr
def encode_captions(cap_dir, imlist_new, imlabelist, config):
# config.attrdir has to be 2 directories joined as strings default argument to use
# In interest of time, we're only doing w2v captions
# Get caption text dir and feature dir
attrdir = './data/CUB/text_c10,./data/CUB/w2v_c10'
cap_dir = attrdir.split(',')[0]
feat_dir = attrdir.split(',')[1]
# Load appropriate mapping
all_f = glob.glob(cap_dir + '/*/*.txt')
all_f = sorted(all_f)
all_f = [x.replace('./','').replace('.txt', '.jpg').replace(cap_dir + '/', '') for x in all_f]
# Load all class t7 files
t7_dict = {}
class_names = list(set([x.split('/')[0] for x in imlist_new]))
print('Loading feature files..')
for i in class_names:
fname = i
if fname in list(CUB_FNAME_FIX.keys()):
fname = CUB_FNAME_FIX[fname]
t7_dict[i] = torchfile.load(feat_dir + '/' + fname + '.t7')
im_attr = {}
# Do this iteratively
print('Encoding captions...')
for i in tqdm(range(len(imlist_new))):
imname = imlist_new[i]
# Image name to class-t7 file
class_name = imname.split('/')[0]
data = t7_dict[class_name]
imind = all_f.index(imname)
indlist = sorted([all_f.index(x) for x in all_f if class_name in x])
pos = indlist.index(imind)
feat = data[pos].T
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = np.mean(feat, axis=0).tolist()
return im_attr
def im_imid_map(imgmap):
im_imid = {}
with open(imgmap) as f:
for line in f:
l = line.rstrip('\n').split(" ")
im_imid[ntpath.basename(l[1])] = l[0]
return im_imid
def load_class_splits(config):
split_file = config.split_file
class_listf= config.class_listf
# Create a mapping for the reduced classes
# Load class splits from split_file
class_split = loadmat(split_file)
# Get train class-IDs
train_cid = class_split['train_cid'][0].tolist()
test_cid = class_split['test_cid'][0].tolist()
# Load all classes and ignore classes that are not in the seen set
train_seen_class = []
val_seen_class = []
for line in open(class_listf, 'r').readlines():
classID = int(line.strip('\n').split(' ')[0])
class_name = line.strip('\n').split(' ')[1]
if classID in train_cid:
train_seen_class.append((classID-1, class_name))
# Split train classes into train and val
random.shuffle(train_seen_class)
train_seen_class_split = train_seen_class[:int(config.n_train_class)]
val_seen_class_split = train_seen_class[int(config.n_train_class):]
return train_seen_class_split, val_seen_class_split
def create_splits(config):
# Create proper train,val and test splits from CUB alphas dataset
im_imid = im_imid_map(config.imagelist)
train_seen_class, val_seen_class= load_class_splits(config)
imlist_new = [x.strip('\n').split(' ')[1] for x in open('./data/CUB/images.txt', 'r').readlines()]
imlabelist = [int(y.strip('\n').split(' ')[1])-1 for y in open(config.imagelabellist, 'r').readlines()]
# modality specific data loader
if config.modality == 'attributes':
if config.a2t == 'True':
im_attr,cls_attr = encode_attributes_class(config.classattrdir_binary,imlabelist, config)
else:
im_attr,cls_attr = encode_attributes_class(config.classattrdir,imlabelist, config)
elif config.modality == 'wikipedia':
im_attr = encode_tfidf(config.attrdir, imlabelist, config)
elif config.modality == 'captions':
cls_attr = encode_captions(config.attrdir, imlist_new, imlabelist, config)
else:
print("Modality not supported")
imlist = []
imlabellist = []
imattrlist = []
imalphaslist = []
train_val_split = []
trainval_alphadir = config.alphadir.format(config.dataset, config.dataset, config.alpha_model_name, int(config.n_seen_train), config.alpha_layer_name)
print('alphadir: ', trainval_alphadir)
for filename in tqdm(os.listdir(trainval_alphadir)):
if filename.endswith(".json") :
with open(trainval_alphadir+filename,'r') as fj:
data = json.load(fj)
image = ntpath.basename(data['image'])
image_id = im_imid[str(image)]
imlist.append(image_id)
gt_class = int(data['gt_cid'])
if config.modality =='attributes':
attr = cls_attr[gt_class]
elif config.modality == 'captions':
attr = cls_attr[str(gt_class+1)]['att']
imattrlist.append(attr)
# Train on all train and val attributes and test on test attributes
# train = 1
if gt_class in [x for (x,_) in train_seen_class]:
train = 1
else:
train = 0
imlabellist.append(gt_class)
gt_class_alpha = data['gt_alpha']
imalphaslist.append(gt_class_alpha)
train_val_split.append(train)
whole_dataset = list(zip(imlist, imlabellist, imattrlist, imalphaslist, train_val_split))
train_split = [x for x in whole_dataset if x[4] == 1]
val_split = [x for x in whole_dataset if x[4] == 0]
shuffle(train_split)
# split into train and validation set
train_split = [(x[0], x[1],x[2], x[3]) for x in train_split]
val_split = [(x[0], x[1],x[2], x[3]) for x in val_split]
print('#train_instances: %d', len(train_split))
print('#val_instances: %d', len(val_split))
return train_split, val_split
def main(args):
# Load config JSON and use the arguments
config = parse_json(args.config_json)
pprint(config)
config = DotMap(config)
train_split, val_split = create_splits(config)
train_im, train_class, train_attr, train_alphas = map(list, zip(*train_split))
val_im, val_class, val_attr, val_alphas = map(list, zip(*val_split))
graph = tf.Graph()
with graph.as_default():
# Training dataset
train_attr = tf.constant(np.array(train_attr).astype(np.float32))
train_alphas = tf.constant(train_alphas)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_attr, train_alphas))
train_dataset = train_dataset.shuffle(buffer_size=len(train_split))
batched_train_dataset = train_dataset.batch(int(config.dom2alpha_batch_size))
# Val dataset
val_attr = tf.constant(np.array(val_attr).astype(np.float32))
val_alphas = tf.constant(val_alphas)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_attr, val_alphas))
val_dataset = val_dataset.shuffle(buffer_size=len(val_split))
batched_val_dataset = val_dataset.batch(int(config.dom2alpha_batch_size))
# Define iterator that operates on either of the splits
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
text, alphas = iterator.get_next()
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
step_initializer = tf.variables_initializer([global_step_tensor])
if config.dom2alpha_model == "linear":
print("----------------------------------------------------------------Creating a linear model, att to alpha--------------------------------------------------------------------")
num_input = int(config.n_attr)
num_output = int(config.n_alphas)
weights = {'out': tf.Variable(tf.random_normal([num_input, num_output]))}
biases = {'out': tf.Variable(tf.random_normal([num_output]))}
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
def neural_net(x):
out_layer = tf.add(tf.matmul(x, weights['out']), biases['out'])
return out_layer
out = neural_net(text)
elif config.dom2alpha_model == "multilayer":
print("------------------------------------------------------------Creating a multilayer (3 layer) model, att to alpha-----------------------------------------------------------")
n_input = int(config.n_attr)
n_hidden_1 = int(config.n_hidden_1)# 400
n_hidden_2 = int(config.n_hidden_2)# 450
# n_hidden_2 = 450
n_output = int(config.n_alphas)
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=n_hidden_1, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Hidden fully connected layer with 256 neurons
layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_hidden_2, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Output fully connected layer with a neuron for each class
out_layer = tf.contrib.layers.fully_connected(layer_2, num_outputs=n_output, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
out = multilayer_perceptron(text)
elif config.dom2alpha_model == "2layer":
print("------------------------------------------------------------Creating a multilayer (3 layer) model, att to alpha-----------------------------------------------------------")
n_input = int(config.n_attr)
n_hidden = int(config.n_hidden)# 400
# n_hidden_2 = 450
n_output = int(config.n_alphas)
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=n_hidden, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Hidden fully connected layer with 256 neurons
#layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_hidden_2, activation_fn=tf.nn.relu, weights_regularizer=regularizer)
# Output fully connected layer with a neuron for each class
out_layer = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_output, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
out = multilayer_perceptron(text)
#define loss
# Normalize the gt and predicted alphas (required for before feeding to cosine distance loss function)
out_normalized = tf.nn.l2_normalize(out, 1)
alphas_normalized = tf.nn.l2_normalize(alphas, 1)
if config.alpha_loss_type == "cd":
alpha_loss = tf.reduce_mean(tf.losses.cosine_distance(alphas_normalized, out_normalized, dim=1, reduction = tf.losses.Reduction.NONE))
elif config.alpha_loss_type == "l1":
alpha_loss = tf.reduce_mean(tf.abs(alphas - out))
elif config.alpha_loss_type == "cdandl1":
alpha_loss = tf.reduce_mean(tf.abs(alphas - out)) + float(config.cdl1_reg)* tf.reduce_mean(tf.losses.cosine_distance(alphas_normalized, out_normalized, dim=1, reduction = tf.losses.Reduction.NONE))
# regularization term: Not sure if this is necessary. It doesn't matter if the alphas scale is matched. Only the weights for the final classifier need to be of the right scale.
reg_loss = float(config.dom2alpha_lambda_reg) * tf.abs(tf.nn.l2_loss(out) - tf.nn.l2_loss(alphas))
loss = alpha_loss + reg_loss
# Training Op
optimizer = tf.train.AdamOptimizer(learning_rate=float(config.learning_rate))
train_op = optimizer.minimize(loss)
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
adam_initializer = tf.variables_initializer(adam_vars)
init_op = tf.initialize_all_variables()
# Define saver
saver = tf.train.Saver()
tf.get_default_graph().finalize()
# Start a session to learn the modality to alpha mapping
with tf.Session(graph=graph) as sess:
val_loss_best = 1e20
val_corr_best = -1
sess.run(adam_initializer)
sess.run(step_initializer)
sess.run(init_op)
tf.train.global_step(sess, global_step_tensor)
# Start by evaluating on val class data
sess.run(val_init_op)
val_loss = []
while True:
try:
l = sess.run(loss)
val_loss.append(l)
except tf.errors.OutOfRangeError:
break
Initial_valLoss = np.array(val_loss).mean()
perf = []
print('Initial Val Loss: {} '.format(Initial_valLoss))
iteration = 1
for epoch in range(int(config.num_epochs)):
print('Epoch {}/{}'.format(epoch+1, int(config.num_epochs)))
sess.run(train_init_op)
while True:
try:
sess.run(train_op)
iteration = iteration + 1
if (iteration-2)%100==0:
print('Iteration: {} Training Loss: {} '.format(iteration, l))
except tf.errors.OutOfRangeError:
break
print("Validating on the val set (images of val classes)")
# Load val class info
sess.run(val_init_op)
val_loss = []
val_alpha_loss = []
val_reg_loss = []
val_rank_corr = []
while True:
try:
l, out_val = sess.run([loss, out])
val_loss.append(l)
except tf.errors.OutOfRangeError:
break
valLoss = np.array(val_loss).mean()
print("Epoch {}, average_val_loss: {}".format(epoch, valLoss))
if valLoss < val_loss_best:
val_loss_best = valLoss
checkpoint_dir = config.dom2alpha_ckpt_dir + 'mod_{}_2alpha_dset_{}_baseNcls_{}_basemodel_{}_layername_{}_d2a_model_{}_n_train_{}_alphaloss_{}_epoch_{}_loss_{:0.2f}.ckpt'.format(config.modality, config.dataset, config.n_seen_train, config.base_model, config.alpha_layer_name, config.dom2alpha_model, config.n_train_class, config.alpha_loss_type, epoch, valLoss)
print("Saving model parameters to: ", checkpoint_dir)
saver.save(sess, checkpoint_dir)
else:
print("Val loss went up ")
iteration += 1
print("Optimization Finished! ")
print("Best Checkpoint dir: ", checkpoint_dir)
print("Initial Validation loss was: {}".format(Initial_valLoss))
print("Best Validation loss achieved: {}".format(val_loss_best))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 18,735 | 41.103371 | 377 | py |
neuron-importance-zsl | neuron-importance-zsl-master/alpha2w.py | # Finetune a network in tensorflow on the CUB dataset
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import ntpath
import json
import pdb
import random
import torchfile
import importlib
from scipy.stats import spearmanr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from tensorflow.python.ops import array_ops
from tensorflow.python import pywrap_tensorflow
from pprint import pprint
import pickle
from dotmap import DotMap
import glob
import itertools
from itertools import groupby
from random import shuffle
from tqdm import tqdm
sys.path.insert(0, '/nethome/rrs6/models/research/slim')
SEED = 1111
tf.set_random_seed(SEED)
random.seed(SEED)
"""
Some classes in Scott Reed's captions
are named differently than the
original CUB_200_2011 folder
"""
CUB_FNAME_FIX = {'093.Clark_Nutcracker': '093.Clarks_Nutcracker',
'124.Le_Conte_Sparrow': '124.Le_Contes_Sparrow',
'180.Wilson_Warbler': '180.Wilsons_Warbler',
'125.Lincoln_Sparrow': '125.Lincolns_Sparrow',
'023.Brandt_Cormorant': '023.Brandts_Cormorant',
'178.Swainson_Warbler': '178.Swainsons_Warbler',
'122.Harris_Sparrow': '122.Harriss_Sparrow',
'113.Baird_Sparrow': '113.Bairds_Sparrow',
'123.Henslow_Sparrow': '123.Henslows_Sparrow',
'098.Scott_Oriole': '098.Scotts_Oriole',
'061.Heermann_Gull': '061.Heermanns_Gull',
'022.Chuck_will_Widow': '022.Chuck_wills_Widow',
'193.Bewick_Wren': '193.Bewicks_Wren',
'067.Anna_Hummingbird': '067.Annas_Hummingbird',
'126.Nelson_Sharp_tailed_Sparrow': '126.Nelsons_Sparrow',
'115.Brewer_Sparrow': '115.Brewers_Sparrow',
'009.Brewer_Blackbird': '009.Brewers_Blackbird'}
parser = argparse.ArgumentParser()
parser.add_argument('--config_json', default='./arg_configs/vgg16_config_AWA_full.json')
VGG_MEAN = [123.68, 116.78, 103.94]
def uhead_plotter(l1, l2, l3, l4, l5, l6, directory, mode):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(range(len(l1)), l1, label='seen_seen')
ax1.plot(range(len(l2)), l2, label='unseen_unseen')
ax1.plot(range(len(l3)), l3, label='seen_unseen_seen_unseen')
ax1.plot(range(len(l4)), l4, label='seen_seen_unseen')
ax1.plot(range(len(l5)), l5, label='unseen_seen_unseen')
ax1.plot(range(len(l6)), l6, label='harmonic')
plt.legend()
plt.title(mode)
fig.savefig(directory + mode + '.png')
plt.close(fig)
def uhead_plotter_loss(l1,directory, mode):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(range(len(l1)), l1, label=' val set')
plt.legend()
plt.title(mode)
fig.savefig(directory + mode + '.png')
plt.close(fig)
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def norm1(a):
return np.sum(np.abs(a))
def entropy(ls):
print(ls)
probs = {x:ls.count(x)/len(ls) for x in ls}
p = np.array(list(probs.values()))
return -p.dot(np.log2(p))
def encode_attributes_class(config, imlabelist):
im_attr = {}
cls_attr = {}
# Use class level supervision
if config.supervision=='class':
class_att_labels = []
with open(config.classattrdir) as f:
for n, line in enumerate(f):
l = [x for x in line.rstrip().split(" ") ]
l = [x for x in l if x]
#l = l.remove('')
l = [float(x) if float(x)!=-1.00 else 0 for x in l ]
cls_attr[n]=l
class_att_labels.append(l)
class_att_labels = np.array(class_att_labels)
#class_att_avg = np.mean(class_att_labels, axis = 0)
for c in range(int(config.n_class)):
imids = [k for k,v in im_attr.items() if v['cls'] == c]
for id in imids:
im_attr[id] = {}
im_attr[id]['att']= class_att_labels[c]/np.max(class_att_labels)
return im_attr, cls_attr
def encode_tfidf(tf_file, imlabelist, config):
# Function to encode the TF-IDF features from wikipedia articles
# Make this compatible with Ram's attribute encoding function
attrdir = './data/CUB/11083D_TFIDF.mat'
tf_idf = scio.loadmat(attrdir)['PredicateMatrix']
im_attr = {}
print('Encoding TF-IDF....')
for i in tqdm(range(len(imlabelist))):
#print(tf_idf[imlabelist[i]-1].tolist())
im_attr[str(i+1)] = {}
im_attr[str(i+1)]['att'] = tf_idf[imlabelist[i]-1].tolist()
return im_attr
def encode_captions(cap_dir, imlist_new, imlabelist, config):
# config.attrdir has to be 2 directories joined as strings default argument to use
# In interest of time, we're only doing w2v captions
# Get caption text dir and feature dir
attrdir = './data/CUB/text_c10,./data/CUB/w2v_c10'
cap_dir = attrdir.split(',')[0]
feat_dir = attrdir.split(',')[1]
# Load appropriate mapping
all_f = glob.glob(cap_dir + '/*/*.txt')
all_f = sorted(all_f)
all_f = [x.replace('./','').replace('.txt', '.jpg').replace(cap_dir + '/', '') for x in all_f]
# Load all class t7 files
t7_dict = {}
class_names = list(set([x.split('/')[0] for x in imlist_new]))
print('Loading caption feature files..')
for i in class_names:
fname = i
if fname in list(CUB_FNAME_FIX.keys()):
fname = CUB_FNAME_FIX[fname]
t7_dict[i] = torchfile.load(feat_dir + '/' + fname + '.t7')
im_attr = {}
cls_attr = {}
# Do this iteratively
print('Encoding captions...')
for i in tqdm(range(len(imlist_new))):
imname = imlist_new[i]
class_id = int(imlist_new[i].split('.')[0])-1
# Image name to class-t7 file
class_name = imname.split('/')[0]
data = t7_dict[class_name]
imind = all_f.index(imname)
indlist = sorted([all_f.index(x) for x in all_f if class_name in x])
pos = indlist.index(imind)
feat = data[pos].T
#im_attr[str(i+1)] = {}
im_attr[str(i+1)] = np.mean(feat, axis=0).tolist()
if class_id in cls_attr:
cls_attr[class_id].append(np.mean(feat, axis=0))
else:
cls_attr[class_id] = []
cls_attr[class_id].append(np.mean(feat, axis=0))
for id in cls_attr:
cls_attr[id] = np.array(cls_attr[id]).mean(axis=0)
return im_attr, cls_attr
def im_imid_map(imagelist):
im_imid = {}
imid_im = {}
with open(imagelist) as f:
for line in f:
l = line.rstrip('\n').split(" ")
im_imid[ntpath.basename(l[1])] = l[0]
imid_im[l[0]] = ntpath.basename(l[1])
return im_imid, imid_im
def get_alphas(config, imattrlist, checkpoint_path):
graph = tf.Graph()
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
num_input = len(imattrlist[0])
# load dom2alpha model
if config.dom2alpha_model =='linear':
n_alphas = int(config.n_alphas)
weights = {'out': tf.Variable(tf.random_normal([num_input, n_alphas]))}
biases = {'out': tf.Variable(tf.random_normal([n_alphas]))}
adam_vars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name]
def neural_net(x):
out_layer = tf.add(tf.matmul(x, weights['out']), biases['out'])
return out_layer
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = neural_net(text)
elif config.dom2alpha_model=='multilayer':
n_alphas = int(config.n_alphas)
def multilayer_perceptron(x):
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=int(config.n_hidden_1), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=int(config.n_hidden_2), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
out_layer = tf.contrib.layers.fully_connected(layer_2, num_outputs=n_alphas, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = multilayer_perceptron(text)
elif config.dom2alpha_model=='2layer':
n_alphas = int(config.n_alphas)
def multilayer_perceptron(x):
layer_1 = tf.contrib.layers.fully_connected(x, num_outputs=int(config.n_hidden), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
#layer_2 = tf.contrib.layers.fully_connected(layer_1, num_outputs=int(config.n_hidden_2), activation_fn=tf.nn.relu, weights_regularizer=regularizer)
out_layer = tf.contrib.layers.fully_connected(layer_1, num_outputs=n_alphas, activation_fn=None, weights_regularizer=regularizer)
return out_layer
# Construct model
text = tf.placeholder(tf.float32, [None, int(config.n_attr)])
out = multilayer_perceptron(text)
saver = tf.train.Saver()
sess = tf.Session()
init_op = tf.initialize_all_variables()
saver.restore(sess, checkpoint_path)
alpha_val = sess.run(out, feed_dict={text:imattrlist})
return alpha_val
def load_class_splits(split_file, class_listf):
# Create a mapping for the reduced classes
# Load class splits from split_file
class_split = scio.loadmat(split_file)
# Get train class-IDs
train_cid = class_split['train_cid'][0].tolist()
test_cid = class_split['test_cid'][0].tolist()
# Load all classes and ignore classes that are not in the seen set
test_unseen_class = []
for line in open(class_listf, 'r').readlines():
classID = int(line.strip('\n').split(' ')[0])
class_name = line.strip('\n').split(' ')[1]
if classID in test_cid:
test_unseen_class.append((classID-1, class_name))
# Create mapping
ids = sorted([x[0] for x in test_unseen_class])
idmaps = {}
idmaps_inv = {}
for i in range(len(ids)):
idmaps[ids[i]] = i
idmaps_inv[i] = ids[i]
idmaps_seen = {}
idmaps_seen_inv = {}
for i in range(len(train_cid)):
idmaps_seen[train_cid[i] - 1] = i
idmaps_seen_inv[i] = train_cid[i] - 1
idmaps_all = {}
idmaps_all_inv = {}
for i in range(len(train_cid)+len(test_cid)):
if i < len(train_cid):
idmaps_all[train_cid[i] - 1] = i
idmaps_all_inv[i] = train_cid[i] - 1
#print(train_cid[i] -1)
else:
idmaps_all[ids[i-len(train_cid)]] = i
idmaps_all_inv[i] = ids[i-len(train_cid)]
return test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv
def load_data(config, fname,test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv, imagedir, imagelist, imagelabellist, train_test_split_list):
# Check if files exist or not
im_imid, imid_im = im_imid_map(config.imagelist)
# Create proper train, val and test splits from the CUB dataset
imlist = [x.strip('\n').split(' ')[1] for x in open(imagelist, 'r').readlines()]
imidlist = [x.strip('\n').split(' ')[0] for x in open(imagelist, 'r').readlines()]
imlabelist = [int(y.strip('\n').split(' ')[1])-1 for y in open(imagelabellist, 'r').readlines()]
#############################################################################################################################
# Load New imagelist
if config.modality == 'attributes':
im_attr, cls_attr = encode_attributes_class(config,imlabelist)
elif config.modality == 'wikipedia':
im_attr = encode_tfidf(config.attrdir, imlabelist, config)
elif config.modality == 'captions':
imlist_new = [x.strip('\n').split(' ')[1] for x in open(config.imagelist, 'r').readlines()]
im_attr, cls_attr = encode_captions(config.attrdir, imlist_new, imlabelist, config)
else:
print("Modality not supported")
# print(im_attr.keys())
#############################################################################################################################
imattrlist = [cls_attr[x] for x in imlabelist]
clsattralphas = {}
clsattralphas_list = get_alphas(config, list(cls_attr.values()), config.text2alpha_ckpt).tolist()
for h in cls_attr:
clsattralphas[h] = clsattralphas_list[h]
# Remove all instances of unseen classes from the dataset
req_im, req_im_seen, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], [], []
with open (config.sampling_images, 'rb') as fp:
files = pickle.load(fp)
shuffle(files)
seen_im_list = files[:3000]
unseen_test_split_file = config.unseen_test_split_file
seen_test_split_file = config.seen_test_split_file
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_dataset = list(zip(req_im, req_im_seen, req_imclass, req_imattr, req_attralpha))
shuffle(req_dataset)
train_split = req_dataset
print("size of train dataset", len(train_split))
req_dataset = list(zip(req_im,req_im_seen, req_imclass, req_imattr, req_attralpha))
val_split = req_dataset
train_split = [(x[0], x[1], x[2], x[3], x[4]) for x in train_split]
val_split = [(x[0], x[1], x[2], x[3], x[4]) for x in val_split]
print("size of val dataset", len(val_split))
# Create test split seen classes --> for debugging
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
train_split_seen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
# Create test split for all classes --> generalized zsl
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1])+int(config.n_seen))
req_imattr.append(cls_attr[int(d[1])])
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
print("test dataset: number of images from both seen and unseen classes: ", len(req_im))
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(seen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1]))
req_attralpha.append(clsattralphas[idmaps_seen_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split_seen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
req_im, req_imclass, req_imattr, req_attralpha, req_split = [], [], [], [], []
with open(unseen_test_split_file,'r') as fj:
data = json.load(fj)
for d in data:
req_im.append(d[0])
req_imclass.append(int(d[1])+int(config.n_seen))
req_attralpha.append(clsattralphas[idmaps_inv[int(d[1])]])
#req_im_seen.append(seen_im_list[random.randint(0,len(seen_im_list)-1)])
req_split.append(0)
whole_dataset = list(zip(req_im, req_imclass, req_attralpha, req_split))
test_split_unseen = [(x[0], x[1], x[2]) for x in whole_dataset if x[3]==0]
print("length of whole test dataset: ", len(test_split))
print("length of whole test_seen dataset: ", len(test_split_seen))
print("length of whole test_unseen dataset: ", len(test_split_unseen))
return train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen
def check_accuracy(sess, prediction, imclass, accuracy, is_training, dataset_init_op, verbose=False):
# Check accuracy on train or val
# Initialize the dataset
sess.run(dataset_init_op)
#num_correct, num_samples = 0, 0
acc_list = []
while True:
try:
acc, prediction_val, imclass_val= sess.run([accuracy, prediction, imclass], {is_training: False})
if verbose:
print("pred: ", prediction_val)
print("gt cls:", imclass_val)
acc_list.append(acc)
except tf.errors.OutOfRangeError:
break
final_acc = np.mean(np.array(acc_list))
#return float(num_correct)/num_samples
return final_acc
def check_accuracy_normalized(sess, prediction, imclass, accuracy, is_training, dataset_init_op, verbose=False):
# Check accuracy on train or val
# Initialize the dataset
sess.run(dataset_init_op)
acc_list = []
whole_pred_list = []
whole_label_list = []
while True:
try:
acc, prediction_val, imclass_val= sess.run([accuracy, prediction, imclass], {is_training: False})
if verbose:
print("pred: ", prediction_val)
print("gt cls:", imclass_val)
acc_list.append(acc)
# Get of unique predictions
if isinstance(prediction_val,int):
prediction_val = np.array([prediction_val])
if isinstance(imclass_val,int):
imclass_val = np.array([imclass_val])
try:
whole_pred_list += prediction_val.tolist()
whole_label_list += imclass_val.tolist()
except TypeError:
break
except tf.errors.OutOfRangeError:
break
final_acc = np.mean(np.array(acc_list))
# Get unique classes
unique_cls = list(set(whole_label_list))
# Find incices corresponding to a class
all_cls_acc = []
for y in unique_cls:
gt_indices = [i for i,x in enumerate(whole_label_list) if x == y]
acc_clas = float([whole_pred_list[i] for i in gt_indices].count(y))/len(gt_indices)
all_cls_acc.append(acc_clas)
# print("pred list:", whole_pred_list)
# print("label list:", whole_label_list)
return np.mean(all_cls_acc)
# return final_acc
def main(args):
# Load config JSON and use the arguments
config = parse_json(args.config_json)
pprint(config)
config = DotMap(config)
print("loading class splits ..")
test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv = load_class_splits(config.split_file, config.class_listf)
#test_unseen_class, idmaps, idmaps_all, idmaps_seen = load_class_splits(config.split_file, config.class_listf)
# Load the dataset splits
print('Loading data...')
# train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen = load_data(config, config.save_path.split('/')[0], test_unseen_class, idmaps, idmaps_all, idmaps_seen, config.imagedir, config.imagelist, config.imagelabellist, config.train_test_split_list, float(config.train_prop), float(config.val_prop), float(config.test_prop))
train_split, val_split, test_split, test_split_seen, test_split_unseen, train_split_seen = load_data(config, config.save_path.split('/')[0], test_unseen_class, idmaps, idmaps_all, idmaps_seen, idmaps_inv, idmaps_all_inv, idmaps_seen_inv, config.imagedir, config.imagelist, config.imagelabellist, config.train_test_split_list)
train_files,seen_train_files, train_imclass, train_imattr, train_attralpha= map(list, zip(*train_split))
val_files,seen_val_files, val_imclass, val_imattr, val_attralpha = map(list, zip(*val_split))
test_files, test_labels, test_attralpha = map(list, zip(*test_split))
test_files_seen, test_labels_seen, test_attralpha_seen = map(list, zip(*test_split_seen))
train_files_seen, train_labels_seen, train_attralpha_seen = map(list, zip(*train_split_seen))
test_files_unseen, test_labels_unseen, test_attralpha_unseen = map(list, zip(*test_split_unseen))
train_imclass = np.array(train_imclass).astype('int32')
val_imclass = np.array(val_imclass).astype('int32')
test_labels = np.array(test_labels).astype('int32')
test_labels_seen = np.array(test_labels_seen).astype('int32')
test_labels_unseen = np.array(test_labels_unseen).astype('int32')
# Write graph definition based on model name
# Define the computation graph with necessary functions
graph = tf.Graph()
with graph.as_default():
# Preprocessing function and module import
preprocess_module_name = 'preprocessing.' + config.preprocess_fn
preprocess_module = importlib.import_module(preprocess_module_name)
# Get image size
mc = getattr(nets, config.model_class, None)
m = getattr(mc, config.model_name, None)
#im_size = getattr(m, 'default_image_size', None)
im_size = int(config.image_size)
# Parsing an pre-processing function
def _parse_function_seen(seen_filename, imclass, attralpha):
image_f = tf.read_file(seen_filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
# attralpha_noise = tf.random_normal([int(config.n_alphas)])
return res_img, imclass, attralpha
# Substitute for parse_function seen+ unseen uncluding preprocessing
def parse_fn_train(filename, imclass, attralpha):
image_file = tf.read_file(filename)
image = tf.image.decode_jpeg(image_file, channels=3)
processed_image = preprocess_module.preprocess_image(image, im_size, im_size, is_training=False)
return processed_image, imclass, attralpha
def _parse_function_noise(seen_filename, imclass, attralpha):
image_f = tf.read_file(seen_filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
img = tf.random_normal([im_size, im_size, 3], mean=0, stddev=1.0)
img = tf.div(tf.subtract(img, tf.reduce_min(img)), tf.subtract(tf.reduce_max(img), tf.reduce_min(img)))
img = tf.cast(img*255.0, tf.int32)
res_img_noise = tf.cast(img, tf.float32)
res_img = res_img_noise
return res_img, imclass, attralpha
# Parsing an pre-processing function
def _parse_function_val(filename, imclass, attralpha):
image_f = tf.read_file(filename)
image_dec = tf.image.decode_jpeg(image_f, channels=3)
image = tf.cast(image_dec, tf.float32)
# Resize image
res_img = tf.image.resize_images(image, [im_size, im_size])
return res_img, imclass, attralpha
# preprocessing function
def prepro(image, imclass, attralpha):
means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3])
proc_img = image - means
return proc_img, imclass, attralpha
# Dataset creation
# Training dataset
print("Creating datasets..")
train_files = tf.constant(train_files)
seen_train_files = tf.constant(seen_train_files)
train_imclass = tf.constant(train_imclass)
train_attralpha = tf.constant(train_attralpha)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((seen_train_files, train_imclass, train_attralpha))
if config.prepro == 'unified':
train_dataset = train_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size = int(config.batch_size))
else:
if config.sampling_mode =='noise':
train_dataset = train_dataset.map(_parse_function_noise, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
train_dataset = train_dataset.map(_parse_function_seen, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.shuffle(buffer_size=len(train_split))
batched_train_dataset = train_dataset.batch(int(config.batch_size))
# Validation dataset
val_files = tf.constant(val_files)
val_imclass = tf.constant(val_imclass)
val_attralpha = tf.constant(val_attralpha)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_files, val_imclass, val_attralpha))
if config.prepro == 'unified':
val_dataset = val_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
val_dataset = val_dataset.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
val_dataset = val_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_val_dataset = val_dataset.batch(int(config.batch_size))
# Test dataset
test_files = tf.constant(test_files)
test_labels = tf.constant(test_labels)
test_attralpha = tf.constant(test_attralpha)
test_dataset = tf.contrib.data.Dataset.from_tensor_slices((test_files, test_labels, test_attralpha))
if config.prepro == 'unified':
test_dataset = test_dataset.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset = test_dataset.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset = test_dataset.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset = test_dataset.batch(int(config.batch_size))
test_files_seen = tf.constant(test_files_seen)
test_labels_seen = tf.constant(test_labels_seen)
test_attralpha_seen = tf.constant(test_attralpha_seen)
test_dataset_seen = tf.contrib.data.Dataset.from_tensor_slices((test_files_seen, test_labels_seen, test_attralpha_seen))
if config.prepro =='unified':
test_dataset_seen = test_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset_seen = test_dataset_seen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset_seen = test_dataset_seen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset_seen = test_dataset_seen.batch(int(config.batch_size))
# Test dataset unseen
test_files_unseen = tf.constant(test_files_unseen)
test_labels_unseen = tf.constant(test_labels_unseen)
test_attralpha_unseen = tf.constant(test_attralpha_unseen)
test_dataset_unseen = tf.contrib.data.Dataset.from_tensor_slices((test_files_unseen, test_labels_unseen, test_attralpha_unseen))
# test_dataset_unseen = test_dataset_unseen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
if config.prepro =='unified':
test_dataset_unseen = test_dataset_unseen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
test_dataset_unseen = test_dataset_unseen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
test_dataset_unseen = test_dataset_unseen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_test_dataset_unseen = test_dataset_unseen.batch(int(config.batch_size))
# Train dataset seen
train_files_seen = tf.constant(train_files_seen)
train_labels_seen = tf.constant(train_labels_seen)
train_attralpha_seen = tf.constant(train_attralpha_seen)
train_dataset_seen = tf.contrib.data.Dataset.from_tensor_slices((train_files_seen, train_labels_seen, train_attralpha_seen))
#train_dataset_seen = train_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
if config.prepro =='unified':
train_dataset_seen = train_dataset_seen.map(parse_fn_train, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
else:
train_dataset_seen = train_dataset_seen.map(_parse_function_val, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
train_dataset_seen = train_dataset_seen.map(prepro, num_threads=int(config.batch_size), output_buffer_size=int(config.batch_size))
batched_train_dataset_seen = train_dataset_seen.batch(int(config.batch_size))
# Define iterator that operates on either of the splits
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
images, imclass, attralpha = iterator.get_next()
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
test_init_op = iterator.make_initializer(batched_test_dataset)
test_seen_init_op = iterator.make_initializer(batched_test_dataset_seen)
train_seen_init_op = iterator.make_initializer(batched_train_dataset_seen)
test_unseen_init_op = iterator.make_initializer(batched_test_dataset_unseen)
# Boolean variable for train-vs-test
is_training = tf.placeholder(tf.bool)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
model_c = getattr(nets, config.model_class, None)
model = getattr(model_c, config.model_name, None)
arg_scope = getattr(model_c, config.scope, None)
# Get number of classes in train and test
n_seen = int(config.n_seen)
n_unseen = int(config.n_unseen)
print("Defining model from .. ", model_c )
with slim.arg_scope(arg_scope(weight_decay=float(0))):
print("--------------------------Using original network---------------------------------------------------------------")
# with slim.arg_scope(arg_scope()):
if config.base_model == 'resnet':
logits, endpoints = model(images, num_classes=n_seen, is_training=is_training)
else:
logits, endpoints = model(images, num_classes=n_seen, is_training=is_training, dropout_keep_prob=bool(config.dropout))
if config.base_model == 'resnet':
fc8_seen_weights = tf.contrib.framework.get_variables(config.penultimate_seen_weights)
fc8_seen_biases = tf.contrib.framework.get_variables('resnet_v1_101/logits/biases:0')
else:
fc8_seen_weights = tf.contrib.framework.get_variables('vgg_16/fc8/weights:0')
fc8_seen_biases = tf.contrib.framework.get_variables('vgg_16/fc8/biases:0')
# Check for model path
# assert(os.path.isfile(config.ckpt_path))
if config.base_model =='resnet':
if config.ckpt == 'old':
orig_ckpt = config.orig_ckpt_path
orig_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(orig_ckpt)
new_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(config.ckpt_path)
new_var_to_shape_map = new_ckpt_reader.get_variable_to_shape_map()
orig_var_to_shape_map = orig_ckpt_reader.get_variable_to_shape_map()
vars_in_orig_ckpt = [key for key in sorted(orig_var_to_shape_map)]
vars_in_new_ckpt = [key for key in sorted(new_var_to_shape_map)]
vars_in_graph = [x.name.split(':')[0] for x in tf.contrib.framework.get_variables()]
# Variables to borrow from old ckpt
vars_to_borrow = list(set(list(set(vars_in_graph) - set(vars_in_new_ckpt))) & set(vars_in_orig_ckpt))
# Variables to initialize
# vars_to_init = list(set(vars_in_graph) - set(vars_to_borrow))
vars_to_init = list(set(vars_in_graph) - set(vars_to_borrow + vars_in_new_ckpt))
# Old ckpt init function
old_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(orig_ckpt, [x for x in tf.contrib.framework.get_variables() if (x.name.split(':')[0] in vars_to_borrow) and ('global_step' not in x.name)])
# New ckpt init function
new_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, [x for x in tf.contrib.framework.get_variables() if x.name.split(':')[0] in vars_in_new_ckpt])
else:
new_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, tf.contrib.framework.get_variables_to_restore(exclude=['global_step']))
var_init = tf.variables_initializer([global_step_tensor])
# get seen weights and initialize new layer with mean of them
sess1 = tf.Session()
new_ckpt_init_fn(sess1)
fc8_seen_weights_value = sess1.run(fc8_seen_weights)[0]
fc8_seen_biases_value = sess1.run(fc8_seen_biases)[0]
fc8_seen_weights_mean = fc8_seen_weights_value.mean(axis=3)
fc8_seen_biases_mean = fc8_seen_biases_value.mean(axis=0)
fc8_seen_weights_init = np.repeat(fc8_seen_weights_mean, n_unseen, axis=2)
fc8_seen_biases_init = np.repeat(fc8_seen_biases_mean, n_unseen)
logits = tf.squeeze(logits)
# Add a new head
if config.unseen_w_init == 'seen_centered':
# Initialize by a gaussian centered on the seen class weights
mean_seen_wt = tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1, keep_dims=True)
std_seen_wt = tf.sqrt(tf.reduce_mean(tf.square(tf.squeeze(fc8_seen_weights) - mean_seen_wt), axis=1))
mean_wt = tf.tile(mean_seen_wt, [1, n_unseen])
std_wt = tf.tile(tf.expand_dims(std_seen_wt, axis=1), [1, n_unseen])
w_init = tf.random_normal_initializer(mean_wt, std_wt)
logits_unseen = slim.conv2d(endpoints['global_pool'], n_unseen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_unseen', weights_initializer = w_init)
else:
logits_unseen = slim.conv2d(endpoints['global_pool'], n_unseen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_unseen', weights_initializer = tf.constant_initializer(fc8_seen_weights_init))
logits_seen = slim.conv2d(endpoints['global_pool'], n_seen, [1,1], activation_fn = None, normalizer_fn = None, scope='logits_seen', weights_initializer = tf.constant_initializer(fc8_seen_weights_value))
logits = array_ops.squeeze(logits_seen, [1,2])
logits_unseen = array_ops.squeeze(logits_unseen, [1,2])
else:
var_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=['global_step'])
print("Using base model checkpoint from: ", config.ckpt_path)
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, var_to_restore)
if config.unseen_w_init == 'seen_centered':
print("Seen centered initializaton of unseen weights")
# Initialize by a gaussian centered on the seen class weights
mean_seen_wt = tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1, keep_dims=True)
std_seen_wt = tf.sqrt(tf.reduce_mean(tf.square(tf.squeeze(fc8_seen_weights) - mean_seen_wt), axis=1))
mean_wt = tf.tile(mean_seen_wt, [1, n_unseen])
std_wt = tf.tile(tf.expand_dims(std_seen_wt, axis=1), [1, n_unseen])
w_init = tf.random_normal_initializer(mean_wt, std_wt)
logits_unseen = array_ops.squeeze(tf.contrib.layers.fully_connected(inputs=endpoints['vgg_16/fc7'], num_outputs=n_unseen, activation_fn=None, weights_initializer = w_init), [1,2], name = 'fc8_unseen')
else:
logits_unseen = array_ops.squeeze(tf.contrib.layers.fully_connected(inputs=endpoints['vgg_16/fc7'], num_outputs=n_unseen, activation_fn=None), [1,2], name = 'fc8_unseen')
# Evaluation Metrics for seen classes
prediction_seen = tf.to_int32(tf.argmax(logits, -1))
prediction_seen = tf.squeeze(prediction_seen )
imclass = tf.squeeze(imclass)
correct_prediction_seen = tf.equal(prediction_seen , imclass)
accuracy_seen = tf.reduce_mean(tf.cast(correct_prediction_seen , tf.float32))
logits_seen_unseen = tf.concat([logits, logits_unseen],1)
sys.stdout.flush()
# Evaluation Metrics
prediction = tf.to_int32(tf.argmax(logits_unseen, -1))
prediction = tf.squeeze(prediction)
imclass = tf.squeeze(imclass)
correct_prediction = tf.equal(prediction, imclass)
accuracy_unseen = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
# Generalized ZSL performance
prediction_seen_unseen = tf.to_int32(tf.argmax(logits_seen_unseen,-1))
correct_prediction_seen_unseen = tf.equal(prediction_seen_unseen, imclass)
accuracy_seen_unseen = tf.reduce_mean(tf.cast(correct_prediction_seen_unseen, tf.float32))
# ----------------------------------Optimization starts here---------------------------------------
# Define one-hot for seen class
one_hot_seen = tf.one_hot(imclass, n_seen, 1.0)
signal_seen = tf.multiply(logits, one_hot_seen)
# Define how to get alphas
layer_name = config.model_name + '/' + config.layer_name
grads_seen = tf.gradients(signal_seen, endpoints[layer_name])
# Get alphas
alphas_seen = tf.reduce_sum(grads_seen[0], [1,2])
# Define one-hot for unseen class
one_hot_unseen = tf.one_hot(imclass, n_unseen, 1.0)
signal_unseen = tf.multiply(logits_unseen, one_hot_unseen)
# Define how to get alphas
layer_name = config.model_name + '/' + config.layer_name
grads_unseen = tf.gradients(signal_unseen, endpoints[layer_name])[0]
# Get alphas
alphas_unseen = tf.reduce_sum(grads_unseen, [1,2])
# Regularization coefficient
lambda_loss = float(config.reg_lambda)
attr_alpha_normalized = tf.nn.l2_normalize(attralpha, 1)
alphas_unseen_normalized = tf.nn.l2_normalize(alphas_unseen, 1)
# Cosine distance loss, assumes that both inputs are normalized
def binary_activation(x):
cond = tf.less(x, tf.zeros(tf.shape(x)))
out = tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))
return out
# Loss between network alphas and the predicted importances from domain expert --> alpha model
if config.alpha_loss_type =="cd":
zsl_alpha_loss = tf.reduce_mean(tf.losses.cosine_distance(attr_alpha_normalized, alphas_unseen_normalized, dim=1, reduction=tf.losses.Reduction.NONE))
# Define the optimizers
if config.optimizer =='adam':
optimizer = tf.train.AdamOptimizer(float(config.learning_rate))
if config.optimizer =='sgd':
optimizer = tf.train.GradientDescentOptimizer(float(config.learning_rate))
if config.optimizer =='sgd_momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate = float(config.learning_rate) , momentum = 0.9)
# get the newly initialized vars (for the unseen head)
if config.base_model =='resnet':
new_var = [v for v in tf.trainable_variables() if v.name == 'logits_unseen/weights:0' or v.name =='logits_unseen/biases:0' or v.name == 'logits_seen/weights:0' or v.name =='logits_seen/biases:0']
else:
new_var = [v for v in tf.trainable_variables() if v.name == 'fully_connected/weights:0' or v.name =='fully_connected/biases:0']#tf.contrib.framework.get_variables('logits_unseen')
# Regularizer term
if config.reg_loss == 'dimn_wise_l2':
zsl_reg_loss = tf.nn.l2_loss(tf.squeeze(new_var[0]) - tf.expand_dims(tf.reduce_mean(tf.squeeze(fc8_seen_weights), axis=1), axis=1))
# Total loss is sum of loss and lambda times reg term
zsl_loss = zsl_alpha_loss + lambda_loss * zsl_reg_loss
weights_unseen_grad = tf.gradients(alphas_unseen, new_var[0])[0]
# define training op with the parameters to be optimized (unseen head params and global step)
new_train_op = tf.contrib.slim.learning.create_train_op(zsl_loss, optimizer, variables_to_train=new_var)
new_vars_with_adam = new_var + [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name or 'Adam' in x.name or 'global_step' in x.name]
new_vars_with_adam_momentum = new_var + [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'Momentum' in x.name ]
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
new_init = tf.variables_initializer(new_vars_with_adam + new_vars_with_adam_momentum + [global_step_tensor])
print("Graph finalized")
# save accuracies as trainig proceeds
seen_seen_head_ls, unseen_unseen_head_ls, seen_seen_unseen_head_ls, seen_unseen_seen_unseen_head_ls, unseen_seen_unseen_head_ls, hm_ls = [],[],[],[],[], []
vLoss = []
print("Running simple forward pass for getting initial performance ..")
with tf.Session(graph=graph) as sess:
if config.base_model=='resnet':
if config.ckpt=='old':
new_ckpt_init_fn(sess)
old_ckpt_init_fn(sess)
else:
new_ckpt_init_fn(sess)
sess.run(var_init)
else:
init_fn(sess)
sess.run(new_init)
val_loss_best = 1e20
iteration = 1
# Calculate seen head accuracy on the test set
seen_seen_head = check_accuracy_normalized(sess, prediction_seen,imclass, accuracy_seen, is_training, train_seen_init_op, verbose=False)
print("seen head test accuracy (argmax {}): {}".format(config.n_seen, seen_seen_head))
test_accuracy = check_accuracy_normalized(sess, prediction_seen_unseen,imclass, accuracy_seen_unseen, is_training, test_init_op, verbose=False)
print("Normalized Initial seen + unseen head full test accuracy: {}".format(test_accuracy))
# Define criterion for early stopping (loss doesn't improve by 1% in 40 iterations)
zsl_loss_monitor = 1e20
zsl_loss_ctr = 1
zsl_loss_window = int(config.zsl_loss_estop_window)
# Start training
print("Starting Optimization .....")
epoch_flag = False
for epoch in range(1, int(config.num_epochs)):
if epoch_flag:
break
m = 0
sys.stdout.flush()
sess.run(train_init_op)
loss_list = []
loss_alpha_list = []
loss_reg_list = []
while True:
try:
l, zsl_alpha, zsl_reg, zsl_total = sess.run([new_train_op, zsl_alpha_loss, zsl_reg_loss, zsl_loss], {is_training:False})
if zsl_loss_ctr >= zsl_loss_window:
epoch_flag = True
print('Breaking out of optimization split\n\n')
break
if iteration == 1:
zsl_loss_monitor = zsl_total
else:
if (1 - (zsl_total/zsl_loss_monitor)) > float(config.eps_perc):
zsl_loss_ctr = 0
zsl_loss_monitor = zsl_total
else:
zsl_loss_ctr += 1
loss_list.append(l)
loss_alpha_list.append(zsl_alpha)
loss_reg_list.append(zsl_reg)
iteration +=1
except tf.errors.OutOfRangeError:
break
valLoss = np.mean(np.array(loss_list))
print("Epoch {}, average_training_loss_alpha: {}".format(epoch, np.mean(np.array(loss_alpha_list))))
print("Epoch {}, average_training_loss_reg : {}".format(epoch, np.mean(np.array(loss_reg_list))))
print("Epoch {}, average_training_loss: {}".format(epoch, valLoss))
# Compute accuracy
seen_seen_head_ls.append(seen_seen_head)
unseen_unseen_head = check_accuracy_normalized(sess, prediction,imclass, accuracy_unseen, is_training, val_init_op)
print("unseen head test accuracy (argmax {}): {}".format(config.n_unseen, unseen_unseen_head))
unseen_unseen_head_ls.append(unseen_unseen_head)
seen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_seen_init_op)
print("seen head full test accuracy: (argmax {}): {}".format(config.n_class, seen_seen_unseen_head))
seen_seen_unseen_head_ls.append(seen_seen_unseen_head)
seen_unseen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_init_op)
print("seen + unseen head full test accuracy: (argmax {}): {}".format(config.n_class, seen_unseen_seen_unseen_head))
seen_unseen_seen_unseen_head_ls.append(seen_unseen_seen_unseen_head)
unseen_seen_unseen_head = check_accuracy_normalized(sess, prediction_seen_unseen, imclass, accuracy_seen_unseen, is_training, test_unseen_init_op, verbose=False)
print("unseen head full test accuracy: (argmax {}): {}".format(config.n_class, unseen_seen_unseen_head))
unseen_seen_unseen_head_ls.append(unseen_seen_unseen_head)
# Compute Harmonic Mean of seen accuracies and unseen accuracies.
H = 2*seen_seen_unseen_head * unseen_seen_unseen_head/(seen_seen_unseen_head+ unseen_seen_unseen_head)
print("Harmonic mean", H)
hm_ls.append(H)
if valLoss <= val_loss_best:
val_loss_best = valLoss
checkpoint_dir = config.ckpt_dir + '{}_{}_{}_cnn_seen_val{}_alpha_loss_{}_d2a_model_{}_bs_{}_lr_{}_lambda_{}_epoch_{}_ssu_{:0.2f}_usu_{:0.3f}_h_{:0.3f}.ckpt'.format(config.dataset, config.model_name, config.modality, config.n_unseen, config.alpha_loss_type, config.dom2alpha_model, config.batch_size, config.learning_rate, config.reg_lambda, epoch, seen_seen_unseen_head, unseen_seen_unseen_head, H)
saver.save(sess, checkpoint_dir)
print("saved_checkpoint to {}".format(checkpoint_dir))
uhead_plotter(seen_seen_head_ls, unseen_unseen_head_ls, seen_unseen_seen_unseen_head_ls, seen_seen_unseen_head_ls, unseen_seen_unseen_head_ls, hm_ls, config.ckpt_dir, 'Normalized_Accuracy_logs')
sys.stdout.flush()
print("Optimization Done")
print("Best Checkpoint: ", checkpoint_dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
main(args)
| 49,803 | 47.589268 | 419 | py |
neuron-importance-zsl | neuron-importance-zsl-master/seen_pretraining/alpha_extraction.py | """
Code to extract and save alphas from a trained network
1. Note that we're gonna have to coincide the use of previous and new finetuning dataset JSONs
2. Take bypassing into account
3. Data loader can be the same as the CNN finetuning scheme
"""
import os
import sys
import json
import codecs
import random
import importlib
# Add slim folder path to syspath
sys.path.insert(0, '/nethome/rrs6/models/research/slim')
import numpy as np
import pandas as pd
import scipy.io as scio
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from PIL import Image
from tqdm import tqdm
from pprint import pprint
from dotmap import DotMap
from tensorflow.python import pywrap_tensorflow
# Activation bypass mapping
bypass_dict = {'Max2SumPool': 'MaxPool', 'Max2SumWeighted': 'MaxPool', 'Max2SumInvWeighted': 'MaxPool', 'Identity': 'Relu'}
random.seed(123)
def overlayed_maps(gcam, imgf, im_size, save_path):
# Load regular image
size = im_size, im_size
image = Image.open(imgf)
image = image.resize(size, Image.ANTIALIAS)
image.save(save_path.replace('.png', '_orig.png'))
# Create colormap and upsample
cmf = mpl.cm.get_cmap('jet')
cam = Image.fromarray(np.uint8(cmf(gcam)*255))
cam = cam.resize(size, Image.ANTIALIAS)
cam.save(save_path.replace('.jpg', '.png'), 'PNG')
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def get_key(dic, val):
# Get key corresponding to value in dictionary
key = list(dic.keys())[list(dic.values()).index(val)]
return key
@tf.RegisterGradient('Max2SumPool')
def _max2sumpool(op, grad):
"""
Bad solution below. Needs a general PR fix
"""
s = int(op.inputs[0].shape[1])
return tf.image.resize_nearest_neighbor(grad, [s, s])
@tf.RegisterGradient('Max2SumWeighted')
def _max2sumw(op, grad):
"""
Weighted upsampling
"""
s = int(op.inputs[0].shape[1])
temp = tf.image.resize_nearest_neighbor(grad, [s, s])
act = op.inputs[0]
act_sum = tf.reduce_sum(op.inputs[0], [1, 2])
inv_act_sum = tf.reciprocal(act_sum + 1)
coeff = tf.multiply(act, inv_act_sum)
return tf.multiply(temp, coeff)
@tf.RegisterGradient('Max2SumInvWeighted')
def _max2suminvw(op, grad):
"""
Inverse weighted upsampling
"""
eps = 1e-5
s = int(op.inputs[0].shape[1])
temp = tf.image.resize_nearest_neighbor(grad, [s, s])
act = op.inputs[0] + eps
inv_act = tf.reciprocal(act)
inv_act_sum = tf.reduce_sum(inv_act, [1, 2])
inv_inv_act_sum = tf.reciprocal(inv_act_sum)
coeff = tf.multiply(inv_act, inv_inv_act_sum)
return tf.multiply(temp, coeff)
def save_alphas(save_path, class_names, class_idmaps, gt_alp, pred_alp, gt_labl, pred_labl, orig_labl, img, gt_gc, pred_gc, ctr):
"""
Save the extracted alphas
"""
gt_labl = gt_labl.tolist()
pred_labl = pred_labl.tolist()
orig_labl = orig_labl.tolist()
img = img.tolist()
gt_alp = gt_alp.tolist()
pred_alp = pred_alp.tolist()
gt_gc = np.array(gt_gc)
pred_gc = np.array(pred_gc)
for i in range(len(gt_labl)):
res_dict = {}
res_dict['image'] = img[i].decode('utf-8')
print(orig_labl[i])
res_dict['gt_class'] = class_names[int(orig_labl[i])]
res_dict['pred_class'] = class_names[int(get_key(class_idmaps, pred_labl[i]))]
print('Image is: ' + img[i].decode('utf-8'))
print('Ground Truth Class is: ' + class_names[int(orig_labl[i])])
print('Predicted Class is: ' + class_names[int(get_key(class_idmaps, pred_labl[i]))])
res_dict['gt_alpha'] = gt_alp[i]
res_dict['pred_alpha'] = pred_alp[i]
res_dict['gt_cid'] = orig_labl[i]
res_dict['pred_cid'] = get_key(class_idmaps, pred_labl[i])
json.dump(res_dict, codecs.open(save_path + '/' + os.path.splitext(os.path.basename(res_dict['image']))[0] + '_' + class_names[orig_labl[i]] + '.json', 'w', encoding='utf-8'), separators=(',',':'), sort_keys=True, indent=4)
# Save limited gradcam maps
if ctr < 50:
print('Saving maps..')
gcam_save_path = save_path + '_gcam'
if not os.path.exists(gcam_save_path):
os.mkdir(gcam_save_path)
overlayed_maps(gt_gc[i], res_dict['image'], 224, gcam_save_path + '/' + os.path.splitext(os.path.basename(res_dict['image']))[0] + '_gt_' + class_names[orig_labl[i]] + '.png')
overlayed_maps(pred_gc[i], res_dict['image'], 224, gcam_save_path + '/' + os.path.splitext(os.path.basename(res_dict['image']))[0] + '_pred_' + class_names[int(get_key(class_idmaps, pred_labl[i]))] + '.png')
ctr += 1
return ctr
def prepare_data(dataset_json, class_idmaps):
"""
Function to load the dataset splits and extract alphas
"""
json_files = dataset_json.split(',')
dataset = []
for i in json_files:
dataset += parse_json(i)
# Add original class-IDs into the mix!
image_list, label_list, orig_label_list = [], [], []
print('Preparing data..')
for i in tqdm(range(len(dataset))):
image_list.append(dataset[i][0])
label_list.append(dataset[i][1])
orig_label_list.append(get_key(class_idmaps, dataset[i][1]))
prepared_dataset = list(zip(image_list, label_list, orig_label_list))
return prepared_dataset
def get_alphas(config_json):
# Load arguments from config file
print('Reading Arguments..')
config = parse_json(config_json)
pprint(config)
config = DotMap(config)
# Initialize random seed
random.seed(int(config.random_seed))
# Number of classes
n_classes = int(config.num_classes)
# Load class-list
class_names = [x.strip('\n').split(' ')[1] for x in open(config.class_list_f, 'r').readlines()]
# Load class-ID maps
class_idmaps = parse_json(config.class_idmaps_json)
# Prepare data
print('Loading and preparing data..')
data = prepare_data(config.dataset_json, class_idmaps)
# Extract filenames and labels
files, labels, orig_labels = map(list, zip(*data))
labels = np.array(labels).astype('int32')
orig_labels = np.array(orig_labels).astype('int32')
# Write alpha extraction here
print('Creating graph..')
graph = tf.Graph()
with graph.as_default():
# Load the preprocessing function based on argument
preprocess_module_name = 'preprocessing.' + config.preprocess_fn
preprocess_module = importlib.import_module(preprocess_module_name)
print("preprocess module", preprocess_module)
model_class = getattr(nets, config.model_class, None)
model_name = getattr(model_class, config.model_name, None)
im_size = int(config.image_size)
def preprocess(filename, label, orig_label):
image_file = tf.read_file(filename)
image = tf.image.decode_jpeg(image_file, channels=3)
processed_image = preprocess_module.preprocess_image(image, im_size, im_size, is_training=False)
return filename, processed_image, label, orig_label
# Dataset split construction
files = tf.constant(files)
labels = tf.constant(labels)
orig_labels = tf.constant(orig_labels)
dataset = tf.contrib.data.Dataset.from_tensor_slices((files, labels, orig_labels))
dataset = dataset.map(preprocess, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
batched_dataset = dataset.batch(int(config.batch_size))
# Define iterator
iterator = tf.contrib.data.Iterator.from_structure(batched_dataset.output_types, batched_dataset.output_shapes)
fname, images, labels, orig_labels = iterator.get_next()
# Dataset init ops
init_op = iterator.make_initializer(batched_dataset)
# Boolean variable for train-vs-test
is_training = tf.placeholder(tf.bool)
# (No need for is_training boolean variable; Always evaluation)
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
# Get model arg_scope
arg_scope = getattr(model_class, config.scope, None)
with slim.arg_scope(arg_scope()):
bypass_name = None if config.bypass_name == 'None' else config.bypass_name
if bypass_name != None:
print('Bypassing.. ' + bypass_name)
bypass_keys = bypass_name.split(',')
if len(bypass_keys) == 1:
with graph.gradient_override_map({bypass_dict[bypass_keys[0]]: bypass_keys[0]}):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training)
else:
with graph.gradient_override_map({bypass_dict[bypass_keys[0]]: bypass_keys[0], bypass_dict[bypass_keys[1]]: bypass_keys[1]}):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training)
else:
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training)
# Print activations to decide which ones to use
# print(endpoints)
# Load variables from original and new check point
# Find missing variables in new checkpoint that are present in old checkpoint
#if config.ckpt == 'old':
orig_ckpt = config.orig_ckpt_path
orig_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(orig_ckpt)
new_ckpt_reader = pywrap_tensorflow.NewCheckpointReader(config.ckpt_path)
new_var_to_shape_map = new_ckpt_reader.get_variable_to_shape_map()
orig_var_to_shape_map = orig_ckpt_reader.get_variable_to_shape_map()
vars_in_orig_ckpt = [key for key in sorted(orig_var_to_shape_map)]
vars_in_new_ckpt = [key for key in sorted(new_var_to_shape_map)]
# New ckpt init function
new_ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, [x for x in tf.contrib.framework.get_variables() if x.name.split(':')[0] in vars_in_new_ckpt])
# Initializer
# var_init = tf.variables_initializer(vars_to_init + [global_step_tensor])
# # Fix variable loading from checkpoint
# # Load variables from checkpoint
# # Restore the ones from the checkpoint
# # Initialize the rest
# ckpt_reader = pywrap_tensorflow.NewCheckpointReader(config.ckpt_path)
# var_to_shape_map = ckpt_reader.get_variable_to_shape_map()
# vars_in_graph = tf.contrib.framework.get_variables()
# vars_in_ckpt = []
# for key in sorted(var_to_shape_map):
# vars_in_ckpt.append(key)
# vars_to_restore = [x for x in vars_in_graph if x.name.split(':')[0] in vars_in_ckpt]
# ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, vars_to_restore)
# vars_to_init = list(set(vars_in_graph) - set(vars_to_restore))
# var_init = tf.variables_initializer(vars_to_init + [global_step_tensor])
# Squeeze logits
logits = tf.squeeze(logits)
# Get probabilities
probs = tf.nn.softmax(logits)
predictions = tf.to_int32(tf.argmax(logits, 1))
# Compute alphas for both predicted and ground-truth classes
layer_name = config.layer_name
activations = endpoints[layer_name]
gt_one_hot = tf.one_hot(labels, n_classes, 1.0)
pred_one_hot = tf.one_hot(predictions, n_classes, 1.0)
gt_loss = tf.multiply(logits, gt_one_hot)
pred_loss = tf.multiply(logits, pred_one_hot)
gt_grads = tf.gradients(gt_loss, activations)
pred_grads = tf.gradients(pred_loss, activations)
gt_alphas = tf.squeeze(tf.reduce_sum(gt_grads, [2,3]))
pred_alphas = tf.squeeze(tf.reduce_sum(pred_grads, [2,3]))
gt_gcam = tf.reduce_sum(tf.multiply(activations, tf.reshape(gt_alphas, [tf.shape(gt_alphas)[0], 1, 1, tf.shape(gt_alphas)[1]])), axis=3)
pred_gcam = tf.reduce_sum(tf.multiply(activations, tf.reshape(pred_alphas, [tf.shape(pred_alphas)[0], 1, 1, tf.shape(pred_alphas)[1]])), axis=3)
gt_gcam = (gt_gcam - tf.reduce_min(gt_gcam)) / (tf.reduce_max(gt_gcam) - tf.reduce_min(gt_gcam))
pred_gcam = (pred_gcam - tf.reduce_min(pred_gcam)) / (tf.reduce_max(pred_gcam) - tf.reduce_min(pred_gcam))
# Initialize the filewriter
writer = tf.summary.FileWriter(config.save_path + '_summ')
# Finalize graph
tf.get_default_graph().finalize()
if not os.path.exists(config.save_path):
os.mkdir(config.save_path)
# Counter to save Grad-CAM maps for sanity checks
ctr = 0
print('Starting session to extract and save alphas..')
with tf.Session(graph=graph) as sess:
# Assign from checkpoint
# ckpt_init_fn(sess)
new_ckpt_init_fn(sess)
# Initialize variables
# sess.run(var_init)
print("Saving alpas from {} to {}".format(config.ckpt_path, config.save_path))
# Add the model graph to tensorboard
writer.add_graph(sess.graph)
sess.run(init_op)
while True:
try:
gt_alp, pred_alp, gt_labl, pred_labl, orig_labl, file, gt_gc, pred_gc = sess.run([gt_alphas, pred_alphas, labels, predictions, orig_labels, fname, gt_gcam, pred_gcam], {is_training: False})
# Save alphas for the batch in JSONs
ctr = save_alphas(config.save_path, class_names, class_idmaps, gt_alp, pred_alp, gt_labl, pred_labl, orig_labl, file, gt_gc, pred_gc, ctr)
except tf.errors.OutOfRangeError:
break
print("Saved alpas from {} to {}".format(config.ckpt_path, config.save_path))
| 15,568 | 46.036254 | 239 | py |
neuron-importance-zsl | neuron-importance-zsl-master/seen_pretraining/cnn_finetune.py | """
Code to finetune a given CNN on the seen-images for the seen-classes concerned datasets
(Have to add special checks to handle images with multiple frames)
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
import pdb
import json
import random
import importlib
import itertools
# Add slim folder path to syspath
sys.path.insert(0, '/nethome/rrs6/models/research/slim')
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import scipy.io as scio
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from tensorflow.python.ops import array_ops
from tqdm import tqdm
from pprint import pprint
from dotmap import DotMap
from random import shuffle
# Load JSON
def parse_json(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
# Save JSON
def save_json(data, json_file):
with open(json_file, 'w') as f:
json.dump(data, f)
# Plot losses/accuracy on train-val splits
def plot_metrics(train_metric, val_metric, title, save_fname):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(len(train_metric)), train_metric, label='Train Split')
ax.plot(range(len(val_metric)), val_metric, label='Validation Split')
plt.legend()
plt.title(title)
fig.savefig(save_fname)
plt.close(fig)
# Generate hyper-params for grid sweep
# (Do not mess with dropout)
def generate_hyperparameters(config):
# Get hyper-parameter ranges from the config and generate values
return {
"lr_fc": [10**x for x in range(int(config.lr_fc.split(',')[0]), int(config.lr_fc.split(',')[1])+1)],
"lr_full": [10**x for x in range(int(config.lr_full.split(',')[0]), int(config.lr_full.split(',')[1])+1)],
"wt_dec": [10**x for x in range(int(config.wt_dec.split(',')[0]), int(config.wt_dec.split(',')[1])+1)]
}
# Return trial-based accuracy (for both loss and accuracy)
def check_metrics(sess, prediction, labels, loss, is_training, dataset_init_op, trials=10, verbose=False):
sess.run(dataset_init_op)
whole_loss_list, whole_pred_list, whole_label_list = [], [], []
while True:
try:
preds, labl, los = sess.run([prediction, labels, loss], {is_training: False})
if verbose:
print(preds.shape)
whole_loss_list.append(los)
whole_pred_list += preds.tolist()
whole_label_list += labl.tolist()
except tf.errors.OutOfRangeError:
break
# Sample-based on number of trials
# Assume default split 0.8
all_indices = list(range(len(whole_label_list)))
num_samples = int(0.8*len(all_indices))
trial_loss_res = []
trial_acc_res = []
for i in range(trials):
shuffle(all_indices)
sample_indices = all_indices[:num_samples]
pred_sample = [x for i,x in enumerate(whole_pred_list) if i in sample_indices]
label_sample = [x for i,x in enumerate(whole_label_list) if i in sample_indices]
loss_sample = [x for i,x in enumerate(whole_loss_list) if i in sample_indices]
# Get unique labels
unique_labl = list(set(label_sample))
per_cls_acc = []
for y in unique_labl:
gt_ind = [i for i,x in enumerate(label_sample) if x == y]
cls_acc = float([pred_sample[i] for i in gt_ind].count(y)/len(gt_ind))
per_cls_acc.append(cls_acc)
trial_loss_res.append(np.mean(loss_sample))
trial_acc_res.append(np.mean(per_cls_acc))
# Return results averaged over trials
return np.mean(trial_loss_res), np.std(trial_loss_res), np.mean(trial_acc_res), np.std(trial_acc_res)
# Function to load data-splits
def load_data(train_split_json, val_split_json, test_split_json):
# Load data from corresponding splits
if os.path.isfile(train_split_json):
print('Train split file exists..')
train_split = parse_json(train_split_json)
else:
print('Train split file does not exist..')
if os.path.isfile(val_split_json):
print('Val split file exists..')
val_split = parse_json(val_split_json)
else:
print('Val split file does not exist..')
if os.path.isfile(test_split_json):
print('Test split file exists..')
test_split = parse_json(test_split_json)
else:
print('Test split file does not exist..')
return train_split, val_split, test_split
# Load arguments from config and run training
# Have multiple runs over hyper-parameters to make sure correct ones are selected
def run_training(config, learning_rate_fc, learning_rate_full, weight_decay, batch_size):
assert(config.finetune_whole_cnn!=None)
# Suffix for ckpt-path
ckpt_suffix = 'setting_lrfc_' + str(learning_rate_fc) + '_lrfull_' + str(learning_rate_full) + '_wd_' + str(weight_decay) + '_bsz_' + str(batch_size)
# Initialize random seed
random.seed(int(config.random_seed))
# Number of classes to fine-tune CNN on
n_classes = int(config.num_classes)
# Load dataset splits
train_split, val_split, test_split = load_data(config.train_split_json, config.val_split_json, config.test_split_json)
# Extract filenames and labels
train_files, train_labels = map(list, zip(*train_split))
val_files, val_labels = map(list, zip(*val_split))
test_files, test_labels = map(list, zip(*test_split))
print('#train-instances: %d' % len(train_split))
print('#val-instances: %d' % len(val_split))
print('#test-instances: %d' % len(test_split))
# Type-cast labels
train_labels = np.array(train_labels).astype('int32')
val_labels = np.array(val_labels).astype('int32')
test_labels = np.array(test_labels).astype('int32')
save_dir = config.save_path + ckpt_suffix + '/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# Write model graph definition
print('Creating Graph..')
graph = tf.Graph()
with graph.as_default():
# Load the preprocessing function based on argument
preprocess_module_name = 'preprocessing.' + config.preprocess_fn
preprocess_module = importlib.import_module(preprocess_module_name)
model_class = getattr(nets, config.model_class, None)
model_name = getattr(model_class, config.model_name, None)
im_size = int(config.image_size)
def train_preprocess(filename, label):
image_file = tf.read_file(filename)
image = tf.image.decode_jpeg(image_file, channels=3)
processed_image = preprocess_module.preprocess_image(image, im_size, im_size, is_training=True)
return processed_image, label
def test_preprocess(filename, label):
image_file = tf.read_file(filename)
image = tf.image.decode_image(image_file, channels=3)
processed_image = preprocess_module.preprocess_image(image, im_size, im_size, is_training=False)
return processed_image, label
# Contrib dataset creation
# Training Split
train_files = tf.constant(train_files)
train_labels = tf.constant(train_labels)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_files, train_labels))
train_dataset = train_dataset.map(train_preprocess, num_threads=int(config.num_workers), output_buffer_size=batch_size)
train_dataset = train_dataset.shuffle(buffer_size=10000)
batched_train_dataset = train_dataset.batch(batch_size)
# Validation Split
val_files = tf.constant(val_files)
val_labels = tf.constant(val_labels)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_files, val_labels))
val_dataset = val_dataset.map(test_preprocess, num_threads=int(config.num_workers), output_buffer_size=batch_size)
batched_val_dataset = val_dataset.batch(batch_size)
# Test Split
test_files = tf.constant(test_files)
test_labels = tf.constant(test_labels)
test_dataset = tf.contrib.data.Dataset.from_tensor_slices((test_files, test_labels))
test_dataset = test_dataset.map(test_preprocess, num_threads=int(config.num_workers), output_buffer_size=batch_size)
batched_test_dataset = test_dataset.batch(batch_size)
# Define iterator
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
images, labels = iterator.get_next()
# Dataset init ops
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
test_init_op = iterator.make_initializer(batched_test_dataset)
# Boolean variable for train-vs-test
is_training = tf.placeholder(tf.bool)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
arg_scope = getattr(model_class, config.scope, None)
# Get model arg-scope
if 'resnet' in config.model_class:
with slim.arg_scope(arg_scope(weight_decay=float(weight_decay))):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training)
logits = array_ops.squeeze(logits, [1,2])
else:
with slim.arg_scope(arg_scope(weight_decay=float(weight_decay))):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training, dropout_keep_prob=float(config.dropout))
# Squeeze logits
#logits = array_ops.squeeze(logits, [1,2])
#logits = tf.Squeeze(logits)
# Check for checkpoint-path
assert(os.path.isfile(config.ckpt_path))
# Define variables to train and restore from the checkpoint
vars_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=[config.model_name + '/' + config.layer_name, 'global_step'])
ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, vars_to_restore)
# Evaluation metrics
prediction = tf.to_int32(tf.argmax(logits, 1))
correct_prediction = tf.equal(prediction, labels)
# Accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Define loss-criterion
loss_ce = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Add regularizers
loss_reg = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])*weight_decay
loss = loss_ce + loss_reg
# Define 2 optimization ops:
# 1. To train the last layer for certain #epochs
last_optimizer = tf.train.AdamOptimizer(learning_rate_fc)
last_vars = tf.contrib.framework.get_variables(config.model_name + '/' + config.layer_name)
last_train_op = tf.contrib.slim.learning.create_train_op(loss, last_optimizer, variables_to_train=last_vars)
# (Call tf.contrib.framework.get_variables() again after declaring optimizer)
last_init = tf.variables_initializer(tf.contrib.framework.get_variables(config.model_name + '/' + config.layer_name) + \
[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name] +\
[global_step_tensor])
# 2. To train the whole network for certain (follow-up) #epochs
full_optimizer = tf.train.AdamOptimizer(learning_rate_full)
full_train_op = tf.contrib.slim.learning.create_train_op(loss, full_optimizer)
full_vars = tf.contrib.framework.get_variables()
full_init = tf.variables_initializer([x for x in full_vars if ('Adam' in x.name)] + \
[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if ('beta' in x.name)])
# Log summaries
for var in last_vars:
tf.summary.histogram(var.name, var)
tf.summary.scalar('Loss', loss)
tf.summary.scalar('Accuracy', accuracy)
merged_summary = tf.summary.merge_all()
# Initialize the filewriter
writer = tf.summary.FileWriter(save_dir + '_filewriter')
# Define saver to save checkpoints
saver = tf.train.Saver(tf.trainable_variables() + [x for x in tf.contrib.framework.get_variables() if 'moving_mean' in x.name or 'moving_var' in x.name])
tf.get_default_graph().finalize()
# Define variables to plot and store summaries
tr_loss = []
vl_loss = []
tr_acc = []
vl_acc = []
iteration = 1
ckpt_tracker = 0
es_fc = int(config.estop_fc)
es_full = int(config.estop_full)
val_monitor_fc = 9999
val_monitor_full = 9999
ctr_fc = 0
ctr_full = 0
inter_epoch = 0
with tf.Session(graph=graph) as sess:
ckpt_init_fn(sess)
# Add the model graph to tensorboard
writer.add_graph(sess.graph)
# Train last fc-layer
sess.run(last_init)
tf.train.global_step(sess, global_step_tensor)
print('Training the last layer now..')
for epoch in range(int(config.num_epochs_fc)):
if ctr_fc >= es_fc:
break
# Run an epoch over the training data
print('Epoch %d/%d' % (epoch+1, int(config.num_epochs_fc)))
sess.run(train_init_op)
while True:
try:
loss_fc, s, logits_val = sess.run([last_train_op, merged_summary, logits], {is_training: True})
if iteration%100==0:
print('Iteration %d Training Loss: %f' % (iteration, loss_fc))
iteration += 1
writer.add_summary(s, iteration)
except tf.errors.OutOfRangeError:
break
# Check metrics on train and validation sets
train_loss, train_loss_std, train_acc, train_acc_std = check_metrics(sess, prediction, labels, loss, is_training, train_init_op)
val_loss, val_loss_std, val_acc, val_acc_std = check_metrics(sess, prediction, labels, loss, is_training, val_init_op)
print('Epoch %d Training Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, train_loss, train_loss_std, train_acc, train_acc_std))
print('Epoch %d Validation Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, val_loss, val_loss_std, val_acc, val_acc_std))
# Test-split performance
test_loss, test_loss_std, test_acc, test_acc_std = check_metrics(sess, prediction, labels, loss, is_training, test_init_op)
print('Epoch %d Test Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, test_loss, test_loss_std, test_acc, test_acc_std))
test_set_perf = { 'Loss': str(test_loss) + '+/-' + str(test_loss_std), 'Accuracy': str(test_acc) + '+/-' + str(test_acc_std)}
save_json(test_set_perf, save_dir + 'test_perf.json')
tr_loss.append(train_loss)
vl_loss.append(val_loss)
tr_acc.append(train_acc)
vl_acc.append(val_acc)
if val_loss < val_monitor_fc:
val_monitor_fc = val_loss
ctr_fc = 0
print('Saving checkpoint...', save_dir+'last_layer_ckpt-'+str(iteration))
saver.save(sess=sess, save_path=save_dir + 'last_layer_ckpt', global_step=iteration)
ckpt_tracker = iteration
else:
ctr_fc += 1
plot_metrics(tr_loss, vl_loss, 'Cross_Entropy_Loss', save_dir + 'loss_log.png')
plot_metrics(tr_acc, vl_acc, 'Accuracy', save_dir + 'accuracy_log.png')
inter_epoch = epoch
print("Best checkpoint", ckpt_tracker)
if int(config.finetune_whole_cnn) == 1:
# Restore previously (best) checkpoint
saver.restore(sess, save_dir + 'last_layer_ckpt-' + str(ckpt_tracker))
# Train the entire CNN
sess.run(full_init)
print('Fine-tuning the whole network')
for epoch in range(int(config.num_epochs_full)):
if ctr_full >= es_full:
break
# Run an epoch over the training data
print('Epoch %d/%d' % (epoch+inter_epoch, int(config.num_epochs_fc)))
sess.run(train_init_op)
while True:
try:
loss_full, s = sess.run([full_train_op, merged_summary], {is_training: True})
if iteration%100==0:
print('Iteration %d Training Loss: %f' % (iteration, loss_full))
iteration += 1
writer.add_summary(s, iteration)
except tf.errors.OutOfRangeError:
break
# Check metrics on train and validation sets
train_loss, train_loss_std, train_acc, train_acc_std = check_metrics(sess, prediction, labels, loss, is_training, train_init_op)
val_loss, val_loss_std, val_acc, val_acc_std = check_metrics(sess, prediction, labels, loss, is_training, val_init_op)
print('Epoch %d Training Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, train_loss, train_loss_std, train_acc, train_acc_std))
print('Epoch %d Validation Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, val_loss, val_loss_std, val_acc, val_acc_std))
# Test-split performance
test_loss, test_loss_std, test_acc, test_acc_std = check_metrics(sess, prediction, labels, loss, is_training, test_init_op)
print('Epoch %d Test Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, test_loss, test_loss_std, test_acc, test_acc_std))
tr_loss.append(train_loss)
vl_loss.append(val_loss)
tr_acc.append(train_acc)
vl_acc.append(val_acc)
# Test-split performance
if val_loss < val_monitor_full:
val_monitor_full = val_loss
ctr_full = 0
print('Saving checkpoint...', save_dir +'ckpt-'+str(iteration))
saver.save(sess=sess, save_path=save_dir + 'ckpt', global_step=iteration)
test_set_perf = { 'Loss': str(test_loss) + '+/-' + str(test_loss_std), 'Accuracy': str(test_acc) + '+/-' + str(test_acc_std)}
save_json(test_set_perf, save_dir + 'test_perf.json')
else:
ctr_full += 1
plot_metrics(tr_loss, vl_loss, 'Cross_Entropy_Loss', save_dir + 'loss_log.png')
plot_metrics(tr_acc, vl_acc, 'Accuracy', save_dir + 'accuracy_log.png')
# Load arguments from config and run training
def run_training_no_search(config_json):
config = parse_json(config_json)
#assert(config.finetune_whole_cnn!=None)
print('Training Script Arguments..')
pprint(config)
config = DotMap(config)
# Initialize random seed
random.seed(int(config.random_seed))
# Number of classes to fine-tune CNN on
n_classes = int(config.num_classes)
# Load dataset splits
train_split, val_split, test_split = load_data(config.train_split_json, config.val_split_json, config.test_split_json)
# Extract filenames and labels
train_files, train_labels = map(list, zip(*train_split))
val_files, val_labels = map(list, zip(*val_split))
test_files, test_labels = map(list, zip(*test_split))
print('#train-instances: %d' % len(train_split))
print('#val-instances: %d' % len(val_split))
print('#test-instances: %d' % len(test_split))
# Type-cast labels
train_labels = np.array(train_labels).astype('int32')
val_labels = np.array(val_labels).astype('int32')
test_labels = np.array(test_labels).astype('int32')
save_dir = config.save_path + '/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# Write model graph definition
print('Creating Graph..')
graph = tf.Graph()
with graph.as_default():
model_class = getattr(nets, config.model_class, None)
model_name = getattr(model_class, config.model_name, None)
#im_size = getattr(model_name, 'default_image_size', None)
im_size = int(config.image_size)
MEAN = [float(config.c1_mean), float(config.c2_mean), float(config.c3_mean)]
def _parse_function(filename, label):
image_file = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_file, channels=3)
image = tf.cast(image_decoded, tf.float32)
image_resized = tf.image.resize_images(image, [im_size, im_size])
return image_resized, label
def preprocess(image, label):
means = tf.reshape(tf.constant(MEAN), [1, 1, 3])
processed_image = image - means
return processed_image, label
# Contrib dataset creation
# Training Split
train_files = tf.constant(train_files)
train_labels = tf.constant(train_labels)
train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_files, train_labels))
# pdb.set_trace()
train_dataset = train_dataset.map(_parse_function, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.map(preprocess, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
train_dataset = train_dataset.shuffle(buffer_size=10000)
batched_train_dataset = train_dataset.batch(int(config.batch_size))
# Validation Split
val_files = tf.constant(val_files)
val_labels = tf.constant(val_labels)
val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_files, val_labels))
val_dataset = val_dataset.map(_parse_function, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
val_dataset = val_dataset.map(preprocess, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
batched_val_dataset = val_dataset.batch(int(config.batch_size))
# Test Split
test_files = tf.constant(test_files)
test_labels = tf.constant(test_labels)
test_dataset = tf.contrib.data.Dataset.from_tensor_slices((test_files, test_labels))
test_dataset = test_dataset.map(_parse_function, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
test_dataset = test_dataset.map(preprocess, num_threads=int(config.num_workers), output_buffer_size=int(config.batch_size))
batched_test_dataset = test_dataset.batch(int(config.batch_size))
# Define iterator
iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes)
images, labels = iterator.get_next()
# Dataset init ops
train_init_op = iterator.make_initializer(batched_train_dataset)
val_init_op = iterator.make_initializer(batched_val_dataset)
test_init_op = iterator.make_initializer(batched_test_dataset)
# Boolean variable for train-vs-test
is_training = tf.placeholder(tf.bool)
# Define the global step to be some tf.Variable
global_step_tensor = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
# Get model arg-scope
arg_scope = getattr(model_class, config.scope, None)
print(arg_scope)
if 'resnet' in config.model_class:
with slim.arg_scope(arg_scope(weight_decay=float(config.wt_dec))):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training)
logits = array_ops.squeeze(logits, [1,2])
else:
with slim.arg_scope(arg_scope(weight_decay=float(config.wt_dec))):
logits, endpoints = model_name(images, num_classes=n_classes, is_training=is_training, dropout_keep_prob=float(config.dropout))
# Check for checkpoint-path
# assert(os.path.isfile(config.ckpt_path))
# Define variables to train and restore from the checkpoint
vars_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=[config.model_name + '/' + config.layer_name, 'global_step'])
ckpt_init_fn = tf.contrib.framework.assign_from_checkpoint_fn(config.ckpt_path, vars_to_restore)
# Define loss-criterion
loss_ce = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Add regularizers
loss_reg = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])*float(config.wt_dec)
loss = loss_ce + loss_reg
# Define 2 optimization ops:
# 1. To train the last layer for certain #epochs
last_optimizer = tf.train.AdamOptimizer(float(config.lr_fc))
last_vars = tf.contrib.framework.get_variables(config.model_name + '/' + config.layer_name)
last_train_op = tf.contrib.slim.learning.create_train_op(loss, last_optimizer, variables_to_train=last_vars, clip_gradient_norm=4.0)
# (Call tf.contrib.framework.get_variables() again after declaring optimizer)
last_init = tf.variables_initializer(tf.contrib.framework.get_variables(config.model_name + '/' + config.layer_name) + \
[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'beta' in x.name] +\
[global_step_tensor])
# 2. To train the whole network for certain (follow-up) #epochs
full_optimizer = tf.train.AdamOptimizer(float(config.lr_full))
full_train_op = tf.contrib.slim.learning.create_train_op(loss, full_optimizer, clip_gradient_norm=4.0)
full_vars = tf.contrib.framework.get_variables()
full_init = tf.variables_initializer([x for x in full_vars if ('Adam' in x.name)] + \
[x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if ('beta' in x.name)])
# Evaluation metrics
logits_argmax = tf.argmax(logits, 1)
prediction = tf.to_int32(logits_argmax)
correct_prediction = tf.equal(prediction, labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Log summaries
for var in last_vars:
tf.summary.histogram(var.name, var)
tf.summary.scalar('Loss', loss)
tf.summary.scalar('Accuracy', accuracy)
merged_summary = tf.summary.merge_all()
# Initialize the filewriter
writer = tf.summary.FileWriter(config.save_path + 'filewriter')
# Define saver to save checkpoints
# pdb.set_trace()
saver = tf.train.Saver(tf.trainable_variables() + [x for x in tf.contrib.framework.get_variables() if 'moving_mean' in x.name or 'moving_var' in x.name])
# saver = tf.train.Saver(tf.trainable_variables())
tf.get_default_graph().finalize()
if not os.path.exists(config.save_path):
os.mkdir(config.save_path)
# Define variables to plot and store summaries
tr_loss = []
vl_loss = []
tr_acc = []
vl_acc = []
iteration = 1
es_fc = int(config.estop_fc)
es_full = int(config.estop_full)
val_monitor_fc = 9999
val_monitor_full = 9999
ctr_fc = 0
ctr_full = 0
inter_epoch = 0
with tf.Session(graph=graph) as sess:
ckpt_init_fn(sess)
# Add the model graph to tensorboard
writer.add_graph(sess.graph)
# Train last fc-layer
sess.run(last_init)
tf.train.global_step(sess, global_step_tensor)
print('Training the last layer (no search)..')
for epoch in range(int(config.num_epochs_fc)):
if ctr_fc >= es_fc:
break
# Run an epoch over the training data
print('Epoch %d/%d' % (epoch+1, int(config.num_epochs_fc)))
sess.run(train_init_op)
while True:
try:
loss_fc, s, logits_val, prediction_val = sess.run([last_train_op, merged_summary, logits_argmax, prediction], {is_training: True})
if iteration%50==0:
print('Iteration %d Training Loss: %f' % (iteration, loss_fc))
# print('prediction', prediction_val.shape)
iteration += 1
writer.add_summary(s, iteration)
except tf.errors.OutOfRangeError:
break
# Check metrics on train and validation sets
train_loss, train_loss_std, train_acc, train_acc_std = check_metrics(sess, prediction, labels, loss, is_training, train_init_op)
val_loss, val_loss_std, val_acc, val_acc_std = check_metrics(sess, prediction, labels, loss, is_training, val_init_op)
print('Epoch %d Training Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, train_loss, train_loss_std, train_acc, train_acc_std))
print('Epoch %d Validation Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, val_loss, val_loss_std, val_acc, val_acc_std))
# Test-split performance
#test_loss, test_loss_std, test_acc, test_acc_std = check_metrics(sess, prediction, labels, loss, is_training, test_init_op, trials=10, verbose=True)
# print('Epoch %d Test Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, test_loss, test_loss_std, test_acc, test_acc_std))
#test_set_perf = { 'Loss': str(test_loss) + '+/-' + str(test_loss_std), 'Accuracy': str(test_acc) + '+/-' + str(test_acc_std)}
# save_json(test_set_perf, save_dir + 'test_perf.json')
tr_loss.append(train_loss)
vl_loss.append(val_loss)
tr_acc.append(train_acc)
vl_acc.append(val_acc)
if val_loss < val_monitor_fc:
val_monitor_fc = val_loss
ctr_fc = 0
print('Saving checkpoint...', config.save_path+'ckpt-'+str(iteration))
saver.save(sess=sess, save_path=config.save_path + 'ckpt', global_step=iteration)
ckpt_tracker = iteration
else:
ctr_fc += 1
plot_metrics(tr_loss, vl_loss, 'Cross_Entropy_Loss', config.save_path + 'loss_log.png')
plot_metrics(tr_acc, vl_acc, 'Accuracy', config.save_path + 'accuracy_log.png')
inter_epoch = epoch
print("best checkpoint", ckpt_tracker)
if int(config.finetune_whole_cnn) == 1:
# Restore previously (best) checkpoint
saver.restore(sess, save_dir + 'ckpt-' + str(ckpt_tracker))
# Train the entire CNN
sess.run(full_init)
print('Fine-tuning the whole network')
for epoch in range(int(config.num_epochs_full)):
if ctr_full >= es_full:
break
# Run an epoch over the training data
print('Epoch %d/%d' % (epoch+inter_epoch, int(config.num_epochs_fc)))
sess.run(train_init_op)
while True:
try:
loss_full, s = sess.run([full_train_op, merged_summary], {is_training: True})
if iteration%50==0:
print('Iteration %d Training Loss: %f' % (iteration, loss_full))
iteration += 1
writer.add_summary(s, iteration)
except tf.errors.OutOfRangeError:
break
# Check metrics on train and validation sets
train_loss, train_loss_std, train_acc, train_acc_std = check_metrics(sess, prediction, labels, loss, is_training, train_init_op)
val_loss, val_loss_std, val_acc, val_acc_std = check_metrics(sess, prediction, labels, loss, is_training, val_init_op)
print('Epoch %d Training Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, train_loss, train_loss_std, train_acc, train_acc_std))
print('Epoch %d Validation Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, val_loss, val_loss_std, val_acc, val_acc_std))
# Test-split performance
# test_loss, test_loss_std, test_acc, test_acc_std = check_metrics(sess, prediction, labels, loss, is_training, test_init_op)
# print('Epoch %d Test Loss %f +/- %f Accuracy %f +/- %f' % (epoch + 1, test_loss, test_loss_std, test_acc, test_acc_std))
tr_loss.append(train_loss)
vl_loss.append(val_loss)
tr_acc.append(train_acc)
vl_acc.append(val_acc)
# Test-split performance
if val_loss < val_monitor_full:
val_monitor_full = val_loss
ctr_full = 0
print('Saving checkpoint...', config.save_path+'ckpt-'+str(iteration))
saver.save(sess=sess, save_path=save_dir + 'ckpt', global_step=iteration)
# test_set_perf = { 'Loss': str(test_loss) + '+/-' + str(test_loss_std), 'Accuracy': str(test_acc) + '+/-' + str(test_acc_std)}
# save_json(test_set_perf, save_dir + 'test_perf.json')
else:
ctr_full += 1
plot_metrics(tr_loss, vl_loss, 'Cross_Entropy_Loss', save_dir + 'loss_log.png')
plot_metrics(tr_acc, vl_acc, 'Accuracy', save_dir + 'accuracy_log.png')
def validate_and_train(config_json):
config = parse_json(config_json)
print('Training Script Arguments..')
pprint(config)
config = DotMap(config)
# Create directory to save all settings
if not os.path.exists(config.save_path):
os.mkdir(config.save_path)
# Get hyper-params
hyper_dict = DotMap(generate_hyperparameters(config))
print(hyper_dict)
lr_fc_ls = hyper_dict.lr_fc
lr_full_ls = hyper_dict.lr_full
wt_dec_ls = hyper_dict.wt_dec
# Generate all hyper-paramter combinations
hparam_comb = list(itertools.product(*[lr_fc_ls, lr_full_ls, wt_dec_ls]))
print("hparam_comb")
pprint(hparam_comb)
for i in range(len(hparam_comb)):
print('Running with settings..')
print('-------------------------------')
print('Setting {} of {}'.format(i, len(hparam_comb)))
pprint({'lr_fc': hparam_comb[i][0], 'lr_full': hparam_comb[i][1], 'wt_dec': hparam_comb[i][2]})
print('-------------------------------')
run_training(config, hparam_comb[i][0], hparam_comb[i][1], hparam_comb[i][2], int(config.batch_size))
print('-------------------------------')
print('-------------------------------')
print('-------------------------------')
| 40,175 | 54.877608 | 173 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/movielens/code/model.py | '''
Tensorflow implementation of AutoInt described in:
AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks.
author: Chence Shi
email: chenceshi@pku.edu.cn
'''
import os
import numpy as np
import tensorflow as tf
from time import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score, log_loss
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
'''
The following two functions are adapted from kyubyong park's implementation of transformer
We slightly modify the code to make it suitable for our work.(add relu, delete key masking and causality mask)
June 2017 by kyubyong park.
kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/transformer
'''
def normalize(inputs, epsilon=1e-8):
'''
Applies layer normalization
Args:
inputs: A tensor with 2 or more dimensions
epsilon: A floating number to prevent Zero Division
Returns:
A tensor with the same shape and data dtype
'''
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs
def multihead_attention(queries,
keys,
values,
num_units=None,
num_heads=1,
dropout_keep_prob=1,
is_training=True,
has_residual=True):
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
V = tf.layers.dense(values, num_units, activation=tf.nn.relu)
if has_residual:
V_res = tf.layers.dense(values, num_units, activation=tf.nn.relu)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)
# Multiplication
weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))
# Scale
weights = weights / (K_.get_shape().as_list()[-1] ** 0.5)
# Activation
weights = tf.nn.softmax(weights)
# Dropouts
weights = tf.layers.dropout(weights, rate=1-dropout_keep_prob,
training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(weights, V_)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)
# Residual connection
if has_residual:
outputs += V_res
outputs = tf.nn.relu(outputs)
# Normalize
outputs = normalize(outputs)
return outputs
class AutoInt():
def __init__(self, args, feature_size, run_cnt):
self.feature_size = feature_size # denote as n, dimension of concatenated features
self.field_size = args.field_size # denote as M, number of total feature fields
self.embedding_size = args.embedding_size # denote as d, size of the feature embedding
self.blocks = args.blocks # number of the blocks
self.heads = args.heads # number of the heads
self.block_shape = args.block_shape
self.output_size = args.block_shape[-1]
self.has_residual = args.has_residual
self.deep_layers = args.deep_layers # whether to joint train with deep networks as described in paper
self.batch_norm = args.batch_norm
self.batch_norm_decay = args.batch_norm_decay
self.drop_keep_prob = args.dropout_keep_prob
self.l2_reg = args.l2_reg
self.epoch = args.epoch
self.batch_size = args.batch_size
self.learning_rate = args.learning_rate
self.optimizer_type = args.optimizer_type
self.save_path = args.save_path + str(run_cnt) + '/'
self.is_save = args.is_save
if (args.is_save == True and os.path.exists(self.save_path) == False):
os.makedirs(self.save_path)
self.verbose = args.verbose
self.random_seed = args.random_seed
self.loss_type = args.loss_type
self.eval_metric = roc_auc_score
self.best_loss = 1.0
self.greater_is_better = args.greater_is_better
self.train_result, self.valid_result = [], []
self.train_loss, self.valid_loss = [], []
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
# placeholder for single-value field.
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * M-1
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * M-1
# placeholder for multi-value field. (movielens dataset genre field)
self.genre_index = tf.placeholder(tf.int32, shape=[None, None],
name="genre_index") # None * 6
self.genre_value = tf.placeholder(tf.float32, shape=[None, None],
name="genre_value") # None * 6
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
# In our implementation, the shape of dropout_keep_prob is [3], used in 3 different places.
self.dropout_keep_prob = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_prob")
self.train_phase = tf.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # None * M-1 * d
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size-1, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value) # None * M-1 * d
# for multi-value field
self.embeddings_m = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.genre_index) # None * 6 * d
genre_value = tf.reshape(self.genre_value, shape=[-1, 6, 1])
self.embeddings_m = tf.multiply(self.embeddings_m, genre_value)
self.embeddings_m = tf.reduce_sum(self.embeddings_m, axis=1) # None * d
self.embeddings_m = tf.div(self.embeddings_m, tf.reduce_sum(self.genre_value, axis=1, keep_dims=True)) # None * d
#concatenate single-value field with multi-value field
self.embeddings = tf.concat([self.embeddings, tf.expand_dims(self.embeddings_m, 1)], 1) # None * M * d
self.embeddings = tf.nn.dropout(self.embeddings, self.dropout_keep_prob[1]) # None * M * d
# joint training with feedforward nn
if self.deep_layers != None:
self.y_dense = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])
for i in range(0, len(self.deep_layers)):
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["layer_%d" %i]), self.weights["bias_%d"%i]) # None * layer[i]
if self.batch_norm:
self.y_dense = self.batch_norm_layer(self.y_dense, train_phase=self.train_phase, scope_bn="bn_%d" %i)
self.y_dense = tf.nn.relu(self.y_dense)
self.y_dense = tf.nn.dropout(self.y_dense, self.dropout_keep_prob[2])
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["prediction_dense"]),
self.weights["prediction_bias_dense"], name='logits_dense') # None * 1
# ---------- main part of AutoInt-------------------
self.y_deep = self.embeddings # None * M * d
for i in range(self.blocks):
self.y_deep = multihead_attention(queries=self.y_deep,
keys=self.y_deep,
values=self.y_deep,
num_units=self.block_shape[i],
num_heads=self.heads,
dropout_keep_prob=self.dropout_keep_prob[0],
is_training=self.train_phase,
has_residual=self.has_residual)
self.flat = tf.reshape(self.y_deep,
shape=[-1, self.output_size * self.field_size])
self.out = tf.add(tf.matmul(self.flat, self.weights["prediction"]),
self.weights["prediction_bias"], name='logits') # None * 1
if self.deep_layers != None:
self.out += self.y_dense
# ---------- Compute the loss ----------
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='pred')
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
if self.deep_layers != None:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d"%i])
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.var1 = [v for v in tf.trainable_variables() if v.name != 'feature_bias:0']
self.var2 = [tf.trainable_variables()[1]] # self.var2 = [feature_bias]
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
beta1=0.9, beta2=0.999, epsilon=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).\
minimize(self.loss, global_step=self.global_step)
# init
self.saver = tf.train.Saver(max_to_keep=5)
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
self.count_param()
def count_param(self):
k = (np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()]))
print("total parameters :%d" % k)
print("extra parameters : %d" % (k - self.feature_size * self.embedding_size))
def _init_session(self):
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _initialize_weights(self):
weights = dict()
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size(n) * d
input_size = self.output_size * self.field_size
# dense layers
if self.deep_layers != None:
num_layer = len(self.deep_layers)
layer0_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (layer0_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(layer0_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
glorot = np.sqrt(2.0 / (self.deep_layers[-1] + 1))
weights["prediction_dense"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[-1], 1)),
dtype=np.float32, name="prediction_dense")
weights["prediction_bias_dense"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias_dense")
#---------- prediciton weight ------------------#
glorot = np.sqrt(2.0 / (input_size + 1))
weights["prediction"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32, name="prediction")
weights["prediction_bias"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias")
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, Xi_genre, Xv_genre, y, batch_size, index):
start = index * batch_size
end = (index+1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], Xi_genre[start:end], Xv_genre[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c, d, e):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
np.random.set_state(rng_state)
np.random.shuffle(d)
np.random.set_state(rng_state)
np.random.shuffle(e)
def fit_on_batch(self, Xi, Xv, Xi_genre, Xv_genre, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.genre_index: Xi_genre,
self.genre_value: Xv_genre,
self.label: y,
self.dropout_keep_prob: self.drop_keep_prob,
self.train_phase: True}
step, loss, opt = self.sess.run((self.global_step, self.loss, self.optimizer), feed_dict=feed_dict)
return step, loss
# Since the train data is very large, they can not be fit into the memory at the same time.
# We separate the whole train data into several files and call "fit_once" for each file.
def fit_once(self, Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train,
epoch, Xi_valid=None,
Xv_valid=None, Xi_valid_genre=None, Xv_valid_genre=None, y_valid=None,
early_stopping=False):
has_valid = Xv_valid is not None
last_step = 0
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train, self.batch_size, i)
step, loss = self.fit_on_batch(Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch)
last_step = step
# evaluate training and validation datasets
train_result, train_loss = self.evaluate(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train)
self.train_result.append(train_result)
self.train_loss.append(train_loss)
if has_valid:
valid_result, valid_loss = self.evaluate(Xi_valid, Xv_valid, Xi_valid_genre, Xv_valid_genre, y_valid)
self.valid_result.append(valid_result)
self.valid_loss.append(valid_loss)
if valid_loss < self.best_loss and self.is_save == True:
old_loss = self.best_loss
self.best_loss = valid_loss
self.saver.save(self.sess, self.save_path + 'model.ckpt',global_step=last_step)
print("[%d] model saved!. Valid loss is improved from %.4f to %.4f"
% (epoch, old_loss, self.best_loss))
if self.verbose > 0 and ((epoch-1)*9) % self.verbose == 0:
if has_valid:
print("[%d] train-result=%.4f, train-logloss=%.4f, valid-result=%.4f, valid-logloss=%.4f [%.1f s]" % (epoch, train_result, train_loss, valid_result, valid_loss, time() - t1))
else:
print("[%d] train-result=%.4f [%.1f s]" \
% (epoch, train_result, time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_loss):
return False
else:
return True
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv, Xi_genre, Xv_genre):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi, Xv, Xi_genre, Xv_genre, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.genre_index: Xi_batch_genre,
self.genre_value: Xv_batch_genre,
self.label: y_batch,
self.dropout_keep_prob: [1.0] * len(self.drop_keep_prob),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi, Xv, Xi_genre, Xv_genre, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, Xi_genre, Xv_genre, y):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv, Xi_genre, Xv_genre)
y_pred = np.clip(y_pred,1e-6,1-1e-6)
return self.eval_metric(y, y_pred), log_loss(y, y_pred)
def restore(self, save_path=None):
if (save_path == None):
save_path = self.save_path
ckpt = tf.train.get_checkpoint_state(save_path)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if self.verbose > 0:
print ("restored from %s" % (save_path))
| 22,320 | 45.502083 | 190 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/movielens/code/__init__.py | 0 | 0 | 0 | py | |
RecommenderSystems | RecommenderSystems-master/featureRec/movielens/code/train.py | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
def str2list(v):
v=v.split(',')
v=[int(_.strip('[]')) for _ in v]
return v
def str2list2(v):
v=v.split(',')
v=[float(_.strip('[]')) for _ in v]
return v
def str2bool(v):
if v.lower() in ['yes', 'true', 't', 'y', '1']:
return True
elif v.lower() in ['no', 'false', 'f', 'n', '0']:
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--is_save', action='store_true')
parser.add_argument('--greater_is_better', action='store_true', help='early stop criterion')
parser.add_argument('--has_residual', action='store_true', help='add residual')
parser.add_argument('--blocks', type=int, default=2, help='#blocks')
parser.add_argument('--block_shape', type=str2list, default=[16,16], help='output shape of each block')
parser.add_argument('--heads', type=int, default=2, help='#heads')
parser.add_argument('--embedding_size', type=int, default=16)
parser.add_argument('--dropout_keep_prob', type=str2list2, default=[1, 1, 0.5])
parser.add_argument('--epoch', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--l2_reg', type=float, default=0.0)
parser.add_argument('--random_seed', type=int, default=2018)
parser.add_argument('--save_path', type=str, default='./model/')
parser.add_argument('--field_size', type=int, default=23, help='#fields')
parser.add_argument('--loss_type', type=str, default='logloss')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--run_times', type=int, default=3,help='run multiple times to eliminate error')
parser.add_argument('--deep_layers', type=str2list, default=None, help='config for dnn in joint train')
parser.add_argument('--batch_norm', type=int, default=0)
parser.add_argument('--batch_norm_decay', type=float, default=0.995)
#parser.add_argument('--data', type=str, help='data name')
parser.add_argument('--data_path', type=str, help='root path for all the data')
return parser.parse_args()
def _run_(args, run_cnt):
path_prefix = args.data_path
#feature_size = np.load(path_prefix + '/feature_size.npy')[0]
feature_size = 3600
# test: file1, valid: file2, train: file3-10
model = AutoInt(args=args, feature_size=feature_size, run_cnt=run_cnt)
Xi_valid = np.load(path_prefix + '/valid_i_other.npy')
Xv_valid = np.load(path_prefix + '/valid_x_other.npy')
Xi_valid_genre = np.load(path_prefix + '/valid_i_genre.npy')
Xv_valid_genre = np.load(path_prefix + '/valid_x_genre.npy')
y_valid = np.load(path_prefix + '/valid_y.npy')
is_continue = True
for k in range(model.epoch):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
time_epoch = 0
for j in range(1):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
Xi_train = np.load(path_prefix + '/train_i_other.npy')
Xv_train = np.load(path_prefix + '/train_x_other.npy')
Xi_train_genre = np.load(path_prefix + '/train_i_genre.npy')
Xv_train_genre = np.load(path_prefix + '/train_x_genre.npy')
y_train = np.load(path_prefix + '/train_y.npy')
t1 = time()
is_continue = model.fit_once(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train, k+1,
Xi_valid, Xv_valid, Xi_valid_genre, Xv_valid_genre, y_valid, early_stopping=True)
time_epoch += time() - t1
print("epoch %d, time %d" % (k+1, time_epoch))
print('start testing!...')
Xi_test = np.load(path_prefix + '/test_i_other.npy')
Xv_test = np.load(path_prefix + '/test_x_other.npy')
Xi_test_genre = np.load(path_prefix + '/test_i_genre.npy')
Xv_test_genre = np.load(path_prefix + '/test_x_genre.npy')
y_test = np.load(path_prefix + '/test_y.npy')
model.restore()
test_result, test_loss = model.evaluate(Xi_test, Xv_test, Xi_test_genre, Xv_test_genre, y_test)
print("test-result = %.4lf, test-logloss = %.4lf" % (test_result, test_loss))
return test_result, test_loss
if __name__ == "__main__":
args = parse_args()
print(args.__dict__)
print('**************')
test_auc = []
test_log = []
print('run time : %d' % args.run_times)
for i in range(1, args.run_times + 1):
test_result, test_loss = _run_(args, i)
test_auc.append(test_result)
test_log.append(test_loss)
print('test_auc', test_auc)
print('test_log_loss', test_log)
print('avg_auc', sum(test_auc)/len(test_auc))
print('avg_log_loss', sum(test_log)/len(test_log))
| 5,236 | 37.792593 | 107 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/movielens/data/preprocess.py | dict = {}
user_count = 6040
gender = {}
gender['M'] = 1
gender['F'] = 2
dict[1] = "Gender-male"
dict[2] = "Gender-female"
age = {}
age['1'] = 3
age['18'] = 4
age['25'] = 5
age['35'] = 6
age['45'] = 7
age['50'] = 8
age['56'] = 9
dict[3] = "Age-under 18"
dict[4] = "Age-18-24"
dict[5] = "Age-25-34"
dict[6] = "Age-35-44"
dict[7] = "Age-45-49"
dict[8] = "Age-50-55"
dict[9] = "Age-56+"
feature_size = 9
occ = {}
for i in range(21):
feature_size += 1
occ[str(i)] = feature_size
dict[10] = "Occupation-other"
dict[11] = "Occupation-academic/educator"
dict[12] = "Occupation-artist"
dict[13] = "Occupation-clerical/admin"
dict[14] = "Occupation-college/grad student"
dict[15] = "Occupation-customer service"
dict[16] = "Occupation-doctor/health care"
dict[17] = "Occupation-executive/managerial"
dict[18] = "Occupation-farmer"
dict[19] = "Occupation-homemaker"
dict[20] = "Occupation-K-12 student"
dict[21] = "Occupation-lawyer"
dict[22] = "Occupation-programmer"
dict[23] = "Occupation-retired"
dict[24] = "Occupation-sales/marketing"
dict[25] = "Occupation-scientist"
dict[26] = "Occupation-self-employed"
dict[27] = "Occupation-technician/engineer"
dict[28] = "Occupation-tradesman/craftsman"
dict[29] = "Occupation-unemployed"
dict[30] = "Occupation-writer"
f = open('users.dat', 'r')
zipcode = {}
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
if zipcode.get(l[-1]) == None:
feature_size += 1
zipcode[l[-1]] = feature_size
dict[feature_size] = "Zipcode-" + str(l[-1])
f.close()
f = open('users.dat', 'r')
user_i = [[]]
user_v = [[]]
for i in range(1, user_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
user_i.append([gender[l[1]], age[l[2]], occ[l[3]], zipcode[l[4]]])
user_v.append([1, 1, 1, 1])
f.close()
print("The number of user's feature is:", len(user_i))
movie_count = 3883
max_gen = 0
min_gen = 10
year_dict = {}
for i in range(1919, 1930):
year_dict[i] = 1
for i in range(1930, 1940):
year_dict[i] = 2
for i in range(1940, 1950):
year_dict[i] = 3
for i in range(1950, 1960):
year_dict[i] = 4
for i in range(1960, 1970):
year_dict[i] = 5
for i in range(1970, 2001):
year_dict[i] = 6 + i - 1970
f = open('movies.dat', 'r', encoding="ISO-8859-1")
genres = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
s = l[-1]
l = s.split('|')
if len(l) > max_gen:
max_gen = len(l)
if len(l) < min_gen:
min_gen = len(l)
if len(l) == 0:
print('error')
for _ in l:
if genres.get(_) == None:
feature_size += 1
genres[_] = feature_size
dict[feature_size] = "Genre-" + _
f.close()
print("2222", feature_size)
print(len(dict))
print('The max number is :', max_gen)
#feature_size += 1 # for year of release
f = open('movies.dat', 'r', encoding="ISO-8859-1")
movie_i = {}
movie_v = {}
for i in range(1, movie_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
MovieID = int(l[0])
Year = int(l[1][-5:-1])
l = l[-1].split('|')
new_i = []
new_v = []
for _ in l:
new_i.append(genres[_])
new_v.append(1)
t = 6 - len(l) # 0 ~ 5 remain
for _ in range(feature_size + 1, feature_size + t + 1):
new_i.append(_)
new_v.append(0)
#new_i.append(feature_size + 6)
#new_v.append(Year)
new_i.append(feature_size + 5 + year_dict[Year])
new_v.append(1)
movie_i[MovieID] = new_i
movie_v[MovieID] = new_v
f.close()
print(feature_size + 1, feature_size + 5)
#feature_size += 6
dict[feature_size + 1] = "Genre-NULL"
dict[feature_size + 2] = "Genre-NULL"
dict[feature_size + 3] = "Genre-NULL"
dict[feature_size + 4] = "Genre-NULL"
dict[feature_size + 5] = "Genre-NULL"
feature_size += 5
feature_size += 1
dict[feature_size] = "Release-1919-1929"
feature_size += 1
dict[feature_size] = "Release-1930-1939"
feature_size += 1
dict[feature_size] = "Release-1940-1949"
feature_size += 1
dict[feature_size] = "Release-1950-1959"
feature_size += 1
dict[feature_size] = "Release-1960-1969"
for y in range(1970, 2001):
feature_size += 1
dict[feature_size] = "Release-" + str(y)
print("####: ", feature_size)
print(len(dict))
print("The number of movie's feature is:", len(movie_i))
feature_size += 1 # for timestamp
dict[feature_size] = "Timestamp"
f = open('ratings.dat', 'r')
data_i = []
data_v = []
Y = []
#U = []
#I = []
all_count = 1000209
ratings_count = 0
for i in range(1, all_count + 1):
line = f.readline()
line = line[:-1]
l = line.split('::')
y = int(l[2])
new_i = user_i[int(l[0])].copy()
new_v = user_v[int(l[0])].copy()
new_i.extend(movie_i[int(l[1])])
new_v.extend(movie_v[int(l[1])])
new_i.append(feature_size)
new_v.append(int(l[3]))
if y > 3:
y = 1
elif y < 3:
y = 0
else:
y = -1
if y != -1:
data_i.append(new_i)
data_v.append(new_v)
# U.append(int(l[0]))
# I.append(int(l[1]))
Y.append(y)
ratings_count += 1
f.close()
print('valid number of ratings:', len(data_v))
print('Positive number =', sum(Y))
print(feature_size)
print("Dict: ", len(dict))
print('All =', len(data_i))
import numpy as np
import random
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
R = []
for i in range(ratings_count):
R.append([data_v[i][-1]])
#print(R)
#print(np.max(R))
#print(np.min(R))
R = scaler.fit_transform(R)
#print(R)
for i in range(ratings_count):
data_v[i].pop()
data_v[i].append(R[i][0])
# data_v[i].append(U[i])
# data_v[i].append(I[i])
print(data_v[0])
perm = []
for i in range(ratings_count):
perm.append(i)
random.seed(2019)
random.shuffle(perm)
train_count = int(ratings_count * 0.8)
valid_count = int(ratings_count * 0.9)
X_i_tr = []
X_v_tr = []
Y_tr = []
for i in range(train_count):
X_i_tr.append(data_i[perm[i]])
X_v_tr.append(data_v[perm[i]])
Y_tr.append(Y[perm[i]])
X_i_tr = np.array(X_i_tr)
X_v_tr = np.array(X_v_tr)
Y_tr = np.array(Y_tr)
i1 = X_i_tr[:, 0:4]
i2 = X_i_tr[:, 4:10]
i3 = X_i_tr[:, 10:]
x1 = X_v_tr[:, 0:4]
x2 = X_v_tr[:, 4:10]
x3 = X_v_tr[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("train_i_genre.npy", i2)
np.save("train_i_other.npy", i4)
np.save("train_x_genre.npy", x2)
np.save("train_x_other.npy", x4)
np.save("train_y.npy", np.array(Y_tr))
#np.save("train_ui.npy", np.array(ui_tr))
X_i_va = []
X_v_va = []
Y_va = []
for i in range(train_count, valid_count):
X_i_va.append(data_i[perm[i]])
X_v_va.append(data_v[perm[i]])
Y_va.append(Y[perm[i]])
# ui_va.append([U[perm[i]], I[perm[i]])
X_i_va = np.array(X_i_va)
X_v_va = np.array(X_v_va)
Y_va = np.array(Y_va)
i1 = X_i_va[:, 0:4]
i2 = X_i_va[:, 4:10]
i3 = X_i_va[:, 10:]
x1 = X_v_va[:, 0:4]
x2 = X_v_va[:, 4:10]
x3 = X_v_va[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("valid_i_genre.npy", i2)
np.save("valid_i_other.npy", i4)
np.save("valid_x_genre.npy", x2)
np.save("valid_x_other.npy", x4)
np.save("valid_y.npy", np.array(Y_va))
X_i_te = []
X_v_te = []
Y_te = []
for i in range(valid_count, ratings_count):
X_i_te.append(data_i[perm[i]])
X_v_te.append(data_v[perm[i]])
Y_te.append(Y[perm[i]])
# ui_te.append(U[perm[i]]], I[perm[i]])
X_i_te = np.array(X_i_te)
X_v_te = np.array(X_v_te)
Y_te = np.array(Y_te)
i1 = X_i_te[:, 0:4]
i2 = X_i_te[:, 4:10]
i3 = X_i_te[:, 10:]
x1 = X_v_te[:, 0:4]
x2 = X_v_te[:, 4:10]
x3 = X_v_te[:, 10:]
i4 = np.concatenate((i1,i3), axis=1)
x4 = np.concatenate((x1,x3), axis=1)
np.save("test_i_genre.npy", i2)
np.save("test_i_other.npy", i4)
np.save("test_x_genre.npy", x2)
np.save("test_x_other.npy", x4)
np.save("test_y.npy", np.array(Y_te))
print(len(X_i_tr))
print(len(X_i_va))
print(len(X_i_te))
print(len(Y))
f = open("feature.txt", 'w')
f.write(str(dict))
| 7,665 | 20.840456 | 67 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/autoint/model.py | '''
Tensorflow implementation of AutoInt described in:
AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks.
author: Chence Shi
email: chenceshi@pku.edu.cn
'''
import os
import numpy as np
import tensorflow as tf
from time import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score, log_loss
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
'''
The following two functions are adapted from kyubyong park's implementation of transformer
We slightly modify the code to make it suitable for our work.(add relu, delete key masking and causality mask)
June 2017 by kyubyong park.
kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/transformer
'''
def normalize(inputs, epsilon=1e-8):
'''
Applies layer normalization
Args:
inputs: A tensor with 2 or more dimensions
epsilon: A floating number to prevent Zero Division
Returns:
A tensor with the same shape and data dtype
'''
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs
def multihead_attention(queries,
keys,
values,
num_units=None,
num_heads=1,
dropout_keep_prob=1,
is_training=True,
has_residual=True):
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
V = tf.layers.dense(values, num_units, activation=tf.nn.relu)
if has_residual:
V_res = tf.layers.dense(values, num_units, activation=tf.nn.relu)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)
# Multiplication
weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))
# Scale
weights = weights / (K_.get_shape().as_list()[-1] ** 0.5)
# Activation
weights = tf.nn.softmax(weights)
# Dropouts
weights = tf.layers.dropout(weights, rate=1-dropout_keep_prob,
training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(weights, V_)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)
# Residual connection
if has_residual:
outputs += V_res
outputs = tf.nn.relu(outputs)
# Normalize
outputs = normalize(outputs)
return outputs
class AutoInt():
def __init__(self, args, feature_size, run_cnt):
self.feature_size = feature_size # denote as n, dimension of concatenated features
self.field_size = args.field_size # denote as M, number of total feature fields
self.embedding_size = args.embedding_size # denote as d, size of the feature embedding
self.blocks = args.blocks # number of the blocks
self.heads = args.heads # number of the heads
self.block_shape = args.block_shape
self.output_size = args.block_shape[-1]
self.has_residual = args.has_residual
self.deep_layers = args.deep_layers # whether to joint train with deep networks as described in paper
self.batch_norm = args.batch_norm
self.batch_norm_decay = args.batch_norm_decay
self.drop_keep_prob = args.dropout_keep_prob
self.l2_reg = args.l2_reg
self.epoch = args.epoch
self.batch_size = args.batch_size
self.learning_rate = args.learning_rate
self.optimizer_type = args.optimizer_type
self.save_path = args.save_path + str(run_cnt) + '/'
self.is_save = args.is_save
if (args.is_save == True and os.path.exists(self.save_path) == False):
os.makedirs(self.save_path)
self.verbose = args.verbose
self.random_seed = args.random_seed
self.loss_type = args.loss_type
self.eval_metric = roc_auc_score
self.best_loss = 1.0
self.greater_is_better = args.greater_is_better
self.train_result, self.valid_result = [], []
self.train_loss, self.valid_loss = [], []
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * M
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * M
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
# In our implementation, the shape of dropout_keep_prob is [3], used in 3 different places.
self.dropout_keep_prob = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_prob")
self.train_phase = tf.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # None * M * d
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value) # None * M * d
self.embeddings = tf.nn.dropout(self.embeddings, self.dropout_keep_prob[1]) # None * M * d
# joint training with feedforward nn
if self.deep_layers != None:
self.y_dense = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])
for i in range(0, len(self.deep_layers)):
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["layer_%d" %i]), self.weights["bias_%d"%i]) # None * layer[i]
if self.batch_norm:
self.y_dense = self.batch_norm_layer(self.y_dense, train_phase=self.train_phase, scope_bn="bn_%d" %i)
self.y_dense = tf.nn.relu(self.y_dense)
self.y_dense = tf.nn.dropout(self.y_dense, self.dropout_keep_prob[2])
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["prediction_dense"]),
self.weights["prediction_bias_dense"], name='logits_dense') # None * 1
# ---------- main part of AutoInt-------------------
self.y_deep = self.embeddings # None * M * d
for i in range(self.blocks):
self.y_deep = multihead_attention(queries=self.y_deep,
keys=self.y_deep,
values=self.y_deep,
num_units=self.block_shape[i],
num_heads=self.heads,
dropout_keep_prob=self.dropout_keep_prob[0],
is_training=self.train_phase,
has_residual=self.has_residual)
self.flat = tf.reshape(self.y_deep,
shape=[-1, self.output_size * self.field_size])
self.out = tf.add(tf.matmul(self.flat, self.weights["prediction"]),
self.weights["prediction_bias"], name='logits') # None * 1
if self.deep_layers != None:
self.out += self.y_dense
# ---------- Compute the loss ----------
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='pred')
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
if self.deep_layers != None:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d"%i])
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.var1 = [v for v in tf.trainable_variables() if v.name != 'feature_bias:0']
self.var2 = [tf.trainable_variables()[1]] # self.var2 = [feature_bias]
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
beta1=0.9, beta2=0.999, epsilon=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).\
minimize(self.loss, global_step=self.global_step)
# init
self.saver = tf.train.Saver(max_to_keep=5)
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
self.count_param()
def count_param(self):
k = (np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()]))
print("total parameters :%d" % k)
print("extra parameters : %d" % (k - self.feature_size * self.embedding_size))
def _init_session(self):
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _initialize_weights(self):
weights = dict()
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size(n) * d
input_size = self.output_size * self.field_size
# dense layers
if self.deep_layers != None:
num_layer = len(self.deep_layers)
layer0_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (layer0_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(layer0_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
glorot = np.sqrt(2.0 / (self.deep_layers[-1] + 1))
weights["prediction_dense"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[-1], 1)),
dtype=np.float32, name="prediction_dense")
weights["prediction_bias_dense"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias_dense")
#---------- prediciton weight ------------------#
glorot = np.sqrt(2.0 / (input_size + 1))
weights["prediction"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32, name="prediction")
weights["prediction_bias"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias")
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, y, batch_size, index):
start = index * batch_size
end = (index+1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def fit_on_batch(self, Xi, Xv, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.label: y,
self.dropout_keep_prob: self.drop_keep_prob,
self.train_phase: True}
step, loss, opt = self.sess.run((self.global_step, self.loss, self.optimizer), feed_dict=feed_dict)
return step, loss
# Since the train data is very large, they can not be fit into the memory at the same time.
# We separate the whole train data into several files and call "fit_once" for each file.
def fit_once(self, Xi_train, Xv_train, y_train,
epoch, file_count, Xi_valid=None,
Xv_valid=None, y_valid=None,
early_stopping=False):
has_valid = Xv_valid is not None
last_step = 0
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)
step, loss = self.fit_on_batch(Xi_batch, Xv_batch, y_batch)
last_step = step
# evaluate training and validation datasets
train_result, train_loss = self.evaluate(Xi_train, Xv_train, y_train)
self.train_result.append(train_result)
self.train_loss.append(train_loss)
if has_valid:
valid_result, valid_loss = self.evaluate(Xi_valid, Xv_valid, y_valid)
self.valid_result.append(valid_result)
self.valid_loss.append(valid_loss)
if valid_loss < self.best_loss and self.is_save == True:
old_loss = self.best_loss
self.best_loss = valid_loss
self.saver.save(self.sess, self.save_path + 'model.ckpt',global_step=last_step)
print("[%d-%d] model saved!. Valid loss is improved from %.4f to %.4f"
% (epoch, file_count, old_loss, self.best_loss))
if self.verbose > 0 and ((epoch-1)*9 + file_count) % self.verbose == 0:
if has_valid:
print("[%d-%d] train-result=%.4f, train-logloss=%.4f, valid-result=%.4f, valid-logloss=%.4f [%.1f s]" % (epoch, file_count, train_result, train_loss, valid_result, valid_loss, time() - t1))
else:
print("[%d-%d] train-result=%.4f [%.1f s]" \
% (epoch, file_count, train_result, time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_loss):
return False
else:
return True
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.label: y_batch,
self.dropout_keep_prob: [1.0] * len(self.drop_keep_prob),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, y):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv)
y_pred = np.clip(y_pred,1e-6,1-1e-6)
return self.eval_metric(y, y_pred), log_loss(y, y_pred)
def restore(self, save_path=None):
if (save_path == None):
save_path = self.save_path
ckpt = tf.train.get_checkpoint_state(save_path)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if self.verbose > 0:
print ("restored from %s" % (save_path))
| 20,281 | 43.772627 | 205 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/autoint/train.py | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
def str2list(v):
v=v.split(',')
v=[int(_.strip('[]')) for _ in v]
return v
def str2list2(v):
v=v.split(',')
v=[float(_.strip('[]')) for _ in v]
return v
def str2bool(v):
if v.lower() in ['yes', 'true', 't', 'y', '1']:
return True
elif v.lower() in ['no', 'false', 'f', 'n', '0']:
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--is_save', action='store_true')
parser.add_argument('--greater_is_better', action='store_true', help='early stop criterion')
parser.add_argument('--has_residual', action='store_true', help='add residual')
parser.add_argument('--blocks', type=int, default=2, help='#blocks')
parser.add_argument('--block_shape', type=str2list, default=[16,16], help='output shape of each block')
parser.add_argument('--heads', type=int, default=2, help='#heads')
parser.add_argument('--embedding_size', type=int, default=16)
parser.add_argument('--dropout_keep_prob', type=str2list2, default=[1, 1, 0.5])
parser.add_argument('--epoch', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--l2_reg', type=float, default=0.0)
parser.add_argument('--random_seed', type=int, default=2018)
parser.add_argument('--save_path', type=str, default='./model/')
parser.add_argument('--field_size', type=int, default=23, help='#fields')
parser.add_argument('--loss_type', type=str, default='logloss')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--run_times', type=int, default=3,help='run multiple times to eliminate error')
parser.add_argument('--deep_layers', type=str2list, default=None, help='config for dnn in joint train')
parser.add_argument('--batch_norm', type=int, default=0)
parser.add_argument('--batch_norm_decay', type=float, default=0.995)
parser.add_argument('--data', type=str, help='data name')
parser.add_argument('--data_path', type=str, help='root path for all the data')
return parser.parse_args()
def _run_(args, file_name, run_cnt):
path_prefix = os.path.join(args.data_path, args.data)
feature_size = np.load(path_prefix + '/feature_size.npy')[0]
# test: file1, valid: file2, train: file3-10
model = AutoInt(args=args, feature_size=feature_size, run_cnt=run_cnt)
Xi_valid = np.load(path_prefix + '/part2/' + file_name[0])
Xv_valid = np.load(path_prefix + '/part2/' + file_name[1])
y_valid = np.load(path_prefix + '/part2/' + file_name[2])
is_continue = True
for k in range(model.epoch):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
file_count = 0
time_epoch = 0
for j in range(3, 11):
if not is_continue:
print('early stopping at epoch %d file %d' % (k+1, j))
break
file_count += 1
Xi_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[0])
Xv_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[1])
y_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[2])
print("epoch %d, file %d" %(k+1, j))
t1 = time()
is_continue = model.fit_once(Xi_train, Xv_train, y_train, k+1, file_count,
Xi_valid, Xv_valid, y_valid, early_stopping=True)
time_epoch += time() - t1
print("epoch %d, time %d" % (k+1, time_epoch))
print('start testing!...')
Xi_test = np.load(path_prefix + '/part1/' + file_name[0])
Xv_test = np.load(path_prefix + '/part1/' + file_name[1])
y_test = np.load(path_prefix + '/part1/' + file_name[2])
model.restore()
test_result, test_loss = model.evaluate(Xi_test, Xv_test, y_test)
print("test-result = %.4lf, test-logloss = %.4lf" % (test_result, test_loss))
return test_result, test_loss
if __name__ == "__main__":
args = parse_args()
print(args.__dict__)
print('**************')
if args.data in ['Avazu']:
# Avazu does not have numerical features so we didn't scale the data.
file_name = ['train_i.npy', 'train_x.npy', 'train_y.npy']
elif args.data in ['Criteo', 'KDD2012']:
file_name = ['train_i.npy', 'train_x2.npy', 'train_y.npy']
test_auc = []
test_log = []
print('run time : %d' % args.run_times)
for i in range(1, args.run_times + 1):
test_result, test_loss = _run_(args, file_name, i)
test_auc.append(test_result)
test_log.append(test_loss)
print('test_auc', test_auc)
print('test_log_loss', test_log)
print('avg_auc', sum(test_auc)/len(test_auc))
print('avg_log_loss', sum(test_log)/len(test_log))
| 5,264 | 37.713235 | 107 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Kfold_split/config.py | DATA_PATH = './Criteo/'
TRAIN_I = DATA_PATH + 'train_i.txt'
TRAIN_X = DATA_PATH + 'train_x.txt'
TRAIN_Y = DATA_PATH + 'train_y.txt'
NUM_SPLITS = 10
RANDOM_SEED = 2018
| 169 | 17.888889 | 35 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Kfold_split/stratifiedKfold.py | #Email of the author: zjduan@pku.edu.cn
import numpy as np
import config
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
scale = ""
train_x_name = "train_x.npy"
train_y_name = "train_y.npy"
Column = 13
def _load_data(_nrows=None, debug = False):
train_x = pd.read_csv(config.TRAIN_X,header=None,sep=' ',nrows=_nrows, dtype=np.float)
train_y = pd.read_csv(config.TRAIN_Y,header=None,sep=' ',nrows=_nrows, dtype=np.int32)
train_x = train_x.values
train_y = train_y.values.reshape([-1])
print('data loading done!')
print('training data : %d' % train_y.shape[0])
assert train_x.shape[0]==train_y.shape[0]
return train_x, train_y
def save_x_y(fold_index, train_x, train_y):
_get = lambda x, l: [x[i] for i in l]
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xv_train_, y_train_ = _get(train_x, part_index), _get(train_y, part_index)
save_dir_Xv = config.DATA_PATH + "part" + str(i+1) + "/"
save_dir_y = config.DATA_PATH + "part" + str(i+1) + "/"
if (os.path.exists(save_dir_Xv) == False):
os.makedirs(save_dir_Xv)
if (os.path.exists(save_dir_y) == False):
os.makedirs(save_dir_y)
save_path_Xv = save_dir_Xv + train_x_name
save_path_y = save_dir_y + train_y_name
np.save(save_path_Xv, Xv_train_)
np.save(save_path_y, y_train_)
def save_i(fold_index):
_get = lambda x, l: [x[i] for i in l]
train_i = pd.read_csv(config.TRAIN_I,header=None,sep=' ',nrows=None, dtype=np.int32)
train_i = train_i.values
feature_size = train_i.max() + 1
print ("feature_size = %d" % feature_size)
feature_size = [feature_size]
feature_size = np.array(feature_size)
np.save(config.DATA_PATH + "feature_size.npy", feature_size)
print("train_i size: %d" % len(train_i))
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xi_train_ = _get(train_i, part_index)
save_path_Xi = config.DATA_PATH + "part" + str(i+1)+ '/train_i.npy'
np.save(save_path_Xi, Xi_train_)
def main():
train_x, train_y = _load_data()
print('loading data done!')
folds = list(StratifiedKFold(n_splits=10, shuffle=True,
random_state=config.RANDOM_SEED).split(train_x, train_y))
fold_index = []
for i,(train_id, valid_id) in enumerate(folds):
fold_index.append(valid_id)
print("fold num: %d" % (len(fold_index)))
fold_index = np.array(fold_index)
np.save(config.DATA_PATH + "fold_index.npy", fold_index)
save_x_y(fold_index, train_x, train_y)
print("save train_x_y done!")
fold_index = np.load(config.DATA_PATH + "fold_index.npy")
save_i(fold_index)
print("save index done!")
if __name__ == "__main__":
main()
| 2,976 | 28.186275 | 90 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/KDD2012/scale.py | import math
import config
import numpy as np
def scale(x):
if x > 2:
x = int(math.log(float(x))**2)
return x
def scale_each_fold():
for i in range(1,11):
print('now part %d' % i)
data = np.load(config.DATA_PATH + 'part'+str(i)+'/train_x.npy')
part = data[:,0:13]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(config.DATA_PATH + 'part' + str(i) + '/train_x2.npy', data)
if __name__ == '__main__':
scale_each_fold() | 582 | 23.291667 | 78 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/KDD2012/preprocess.py | #coding=utf-8
#Email of the author: zjduan@pku.edu.cn
'''
0. Click:
1. Impression(numerical)
2. DisplayURL: (categorical)
3. AdID:(categorical)
4. AdvertiserID:(categorical)
5. Depth:(numerical)
6. Position:(numerical)
7. QueryID: (categorical) the key of the data file 'queryid_tokensid.txt'.
8. KeywordID: (categorical)the key of 'purchasedkeyword_tokensid.txt'.
9. TitleID: (categorical)the key of 'titleid_tokensid.txt'.
10. DescriptionID: (categorical)the key of 'descriptionid_tokensid.txt'.
11. UserID: (categorical)the key of 'userid_profile.txt'
12. User's Gender: (categorical)
13. User's Age: (categorical)
'''
import math
train_path = './training.txt'
f1 = open(train_path, 'r')
f2 = open('./userid_profile.txt', 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Column = 12
Field = 13
numr_feat = [1,5,6]
numerical = [0] * Column
cate_feat = [2,3,4,7,8,9,10,11]
index_cnt = 0
index_others = [0] * (Field + 1)
Max = [0] * 12
numerical[0] = -1
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Field + 1):
dic[i] = dict()
###init user_dic
user_dic = dict()
cnt_line = 0
for line in f2:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
# if (debug == True):
# if (cnt_line >= 10000):
# break
split = line.strip('\n').split('\t')
user_dic[split[0]] = [split[1], split[2]]
if (split[1] not in dic[12]):
dic[12][split[1]] = [index_cnt, 0]
index_cnt += 1
if (split[2] not in dic[13]):
dic[13][split[2]] = [index_cnt, 0]
index_cnt += 1
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split('\t')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == 10):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[0]
if (label != '0'): label = '1'
index = [0] * Field
value = ['0'] * Field
for i in range(1, 12):
if (numerical[i] == 1):
index[i - 1] = index_others[i]
if (split[i] != ''):
value[i - 1] = split[i]
Max[i] = max(int(split[i]), Max[i])
else:
if (split[i] != ''):
index[i - 1] = dic[i][split[i]][0]
value[i - 1] = '1'
if (split[i] == ''):
value[i - 1] = '0'
if (i == 11 and split[i] == '0'):
value[i - 1] = '0'
### gender and age
if (split[11] == '' or (split[11] not in user_dic)):
index[12 - 1] = index_others[12]
value[12 - 1] = '0'
index[13 - 1] = index_others[13]
value[13 - 1] = '0'
else:
index[12 - 1] = dic[12][user_dic[split[11]][0]][0]
value[12 - 1] = '1'
index[13 - 1] = dic[13][user_dic[split[11]][1]][0]
value[13 - 1] = '1'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
print ("max number for numerical features:")
for i in numr_feat:
print ("no.:%d max: %d" % (i, Max[i]))
| 4,042 | 29.398496 | 86 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Criteo/scale.py | import math
import config
import numpy as np
def scale(x):
if x > 2:
x = int(math.log(float(x))**2)
return x
def scale_each_fold():
for i in range(1,11):
print('now part %d' % i)
data = np.load(config.DATA_PATH + 'part'+str(i)+'/train_x.npy')
part = data[:,0:13]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(config.DATA_PATH + 'part' + str(i) + '/train_x2.npy', data)
if __name__ == '__main__':
scale_each_fold() | 582 | 23.291667 | 78 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Criteo/config.py | DATA_PATH = './Criteo/'
SOURCE_DATA = './train_examples.txt' | 60 | 29.5 | 36 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Criteo/preprocess.py | import config
train_path = config.SOURCE_DATA
f1 = open(train_path,'r')
dic= {}
# generate three fold.
# train_x: value
# train_i: index
# train_y: label
f_train_value = open(config.DATA_PATH + 'train_x.txt','w')
f_train_index = open(config.DATA_PATH + 'train_i.txt','w')
f_train_label = open(config.DATA_PATH + 'train_y.txt','w')
for i in range(39):
dic[i] = {}
cnt_train = 0
#for debug
#limits = 10000
index = [1] * 26
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
split = line.strip('\n').split('\t')
# 0-label, 1-13 numerical, 14-39 category
for i in range(13,39):
if split[i+1] not in dic[i]:
# [1, 0] 1 is the index for those whose appear times <= 10 0 indicates the appear times
dic[i][split[i+1]] = [1,0]
dic[i][split[i+1]][1] += 1
if dic[i][split[i+1]][0] == 1 and dic[i][split[i+1]][1] > 10:
index[i-13] += 1
dic[i][split[i+1]][0] = index[i-13]
f1.close()
print('total entries :%d\n' % (cnt_train - 1))
# calculate number of category features of every dimension
kinds = [13]
for i in range(13,39):
kinds.append(index[i-13])
print('number of dimensions : %d' % (len(kinds)-1))
print(kinds)
for i in range(1,len(kinds)):
kinds[i] += kinds[i-1]
print(kinds)
# make new data
f1 = open(train_path,'r')
cnt_train = 0
print('remake training data...\n')
for line in f1:
cnt_train +=1
if cnt_train % 100000 ==0:
print('now train cnt : %d\n' % cnt_train)
#if cnt_train > limits:
# break
entry = ['0'] * 39
index = [None] * 39
split = line.strip('\n').split('\t')
label = str(split[0])
for i in range(13):
if split[i+1] != '':
entry[i] = (split[i+1])
index[i] = (i+1)
for i in range(13,39):
if split[i+1] != '':
entry[i] = '1'
index[i] = (dic[i][split[i+1]][0])
for j in range(26):
index[13+j] += kinds[j]
index = [str(item) for item in index]
f_train_value.write(' '.join(entry)+'\n')
f_train_index.write(' '.join(index)+'\n')
f_train_label.write(label+'\n')
f1.close()
f_train_value.close()
f_train_index.close()
f_train_label.close()
| 2,286 | 24.696629 | 97 | py |
RecommenderSystems | RecommenderSystems-master/featureRec/data/Dataprocess/Avazu/preprocess.py | #coding=utf-8
#Email of the author: zjduan@pku.edu.cn
'''
0.id: ad identifier
1.click: 0/1 for non-click/click
2.hour: format is YYMMDDHH, so 14091123 means 23:00 on Sept. 11, 2014 UTC.
3.C1 -- anonymized categorical variable
4.banner_pos
5.site_id
6.site_domain
7.site_category
8.app_id
9.app_domain
10.app_category
11.device_id
12.device_ip
13.device_model
14.device_type
15.device_conn_type
16.C14
17.C15
18.C16
19.C17
20.C18
21.C19
22.C20
23.C21
'''
import pandas as pd
import math
train_path = './train.csv'
f1 = open(train_path, 'r')
dic = {}
f_train_value = open('./train_x.txt', 'w')
f_train_index = open('./train_i.txt', 'w')
f_train_label = open('./train_y.txt', 'w')
debug = False
tune = False
Bound = [5] * 24
label_index = 1
Column = 24
numr_feat = []
numerical = [0] * Column
numerical[label_index] = -1
cate_feat = []
for i in range(Column):
if (numerical[i] == 0):
cate_feat.extend([i])
index_cnt = 0
index_others = [0] * Column
for i in numr_feat:
index_others[i] = index_cnt
index_cnt += 1
numerical[i] = 1
for i in cate_feat:
index_others[i] = index_cnt
index_cnt += 1
for i in range(Column):
dic[i] = dict()
cnt_line = 0
for line in f1:
cnt_line += 1
if (cnt_line == 1): continue # header
if (cnt_line % 1000000 == 0):
print ("cnt_line = %d, index_cnt = %d" % (cnt_line, index_cnt))
if (debug == True):
if (cnt_line >= 10000):
break
split = line.strip('\n').split(',')
for i in cate_feat:
if (split[i] != ''):
if split[i] not in dic[i]:
dic[i][split[i]] = [index_others[i], 0]
dic[i][split[i]][1] += 1
if (dic[i][split[i]][0] == index_others[i] and dic[i][split[i]][1] == Bound[i]):
dic[i][split[i]][0] = index_cnt
index_cnt += 1
if (tune == False):
label = split[label_index]
if (label != '0'): label = '1'
index = [0] * (Column - 1)
value = ['0'] * (Column - 1)
for i in range(Column):
cur = i
if (i == label_index): continue
if (i > label_index): cur = i - 1
if (numerical[i] == 1):
index[cur] = index_others[i]
if (split[i] != ''):
value[cur] = split[i]
else:
if (split[i] != ''):
index[cur] = dic[i][split[i]][0]
value[cur] = '1'
if (split[i] == ''):
value[cur] = '0'
f_train_index.write(' '.join(str(i) for i in index) + '\n')
f_train_value.write(' '.join(value) + '\n')
f_train_label.write(label + '\n')
f1.close()
f_train_index.close()
f_train_value.close()
f_train_label.close()
print ("Finished!")
print ("index_cnt = %d" % index_cnt)
| 2,863 | 22.669421 | 92 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/data/preprocess_DoubanMovie.py | import pandas as pd
import numpy as np
import math
import argparse
import random
from collections import Counter
'''
The original DoubanMovie data can be found at:
https://www.dropbox.com/s/tmwuitsffn40vrz/Douban.tar.gz?dl=0
'''
PATH_TO_DATA = './Douban/'
SOCIAL_NETWORK_FILE = PATH_TO_DATA + 'socialnet/socialnet.tsv'
RATING_FILE = PATH_TO_DATA + 'movie/douban_movie.tsv'
max_length = 30
def process_rating(day=7): # segment session in every $day days.
df = pd.read_csv(RATING_FILE, sep='\t', dtype={0:str, 1:str, 2:np.int32, 3: np.float32})
df = df[df['Rating'].between(1,6,inclusive=True)]
span_left = 1.2e9
span_right = 1.485e9
df = df[df['Timestamp'].between(span_left, span_right, inclusive=True)]
min_timestamp = df['Timestamp'].min()
time_id = [int(math.floor((t-min_timestamp) / (86400*day))) for t in df['Timestamp']]
df['TimeId'] = time_id
session_id = [str(uid)+'_'+str(tid) for uid, tid in zip(df['UserId'], df['TimeId'])]
df['SessionId'] = session_id
print('Statistics of user ratings:')
print('\tNumber of total ratings: {}'.format(len(df)))
print('\tNumber of users: {}'.format(df.UserId.nunique()))
print('\tNumber of items: {}'.format(df.ItemId.nunique()))
print('\tAverage ratings per user:{}'.format(df.groupby('UserId').size().mean()))
return df
def process_social(): # read in social network.
net = pd.read_csv(SOCIAL_NETWORK_FILE, sep='\t', dtype={0:str, 1: str})
net.drop_duplicates(subset=['Follower', 'Followee'], inplace=True)
friend_size = net.groupby('Follower').size()
#net = net[np.in1d(net.Follower, friend_size[friend_size>=5].index)]
print('Statistics of social network:')
print('\tTotal user in social network:{}.\n\tTotal edges(links) in social network:{}.'.format(\
net.Follower.nunique(), len(net)))
print('\tAverage number of friends for users: {}'.format(net.groupby('Follower').size().mean()))
return net
def reset_id(data, id_map, column_name='UserId'):
mapped_id = data[column_name].map(id_map)
data[column_name] = mapped_id
if column_name == 'UserId':
session_id = [str(uid)+'_'+str(tid) for uid, tid in zip(data['UserId'], data['TimeId'])]
data['SessionId'] = session_id
return data
def split_data(day): #split data for training/validation/testing.
df_data = process_rating(day)
df_net = process_social()
df_net = df_net.loc[df_net['Follower'].isin(df_data['UserId'].unique())]
df_net = df_net.loc[df_net['Followee'].isin(df_data['UserId'].unique())]
df_data = df_data.loc[df_data['UserId'].isin(df_net.Follower.unique())]
#restrict session length in [2, max_length]. We set a max_length because too long sequence may come from a fake user.
df_data = df_data[df_data['SessionId'].groupby(df_data['SessionId']).transform('size')>1]
df_data = df_data[df_data['SessionId'].groupby(df_data['SessionId']).transform('size')<=max_length]
#length_supports = df_data.groupby('SessionId').size()
#df_data = df_data[np.in1d(df_data.SessionId, length_supports[length_supports<=max_length].index)]
# split train, test, valid.
tmax = df_data.TimeId.max()
session_max_times = df_data.groupby('SessionId').TimeId.max()
session_train = session_max_times[session_max_times < tmax - 26].index
session_holdout = session_max_times[session_max_times >= tmax - 26].index
train_tr = df_data[df_data['SessionId'].isin(session_train)]
holdout_data = df_data[df_data['SessionId'].isin(session_holdout)]
print('Number of train/test: {}/{}'.format(len(train_tr), len(holdout_data)))
train_tr = train_tr[train_tr['ItemId'].groupby(train_tr['ItemId']).transform('size')>=20]
train_tr = train_tr[train_tr['SessionId'].groupby(train_tr['SessionId']).transform('size')>1]
print('Item size in train data: {}'.format(train_tr['ItemId'].nunique()))
train_item_counter = Counter(train_tr.ItemId)
to_predict = Counter(el for el in train_item_counter.elements() if train_item_counter[el] >= 50).keys()
print('Size of to predict: {}'.format(len(to_predict)))
# split holdout to valid and test.
holdout_cn = holdout_data.SessionId.nunique()
holdout_ids = holdout_data.SessionId.unique()
np.random.shuffle(holdout_ids)
valid_cn = int(holdout_cn * 0.5)
session_valid = holdout_ids[0: valid_cn]
session_test = holdout_ids[valid_cn: ]
valid = holdout_data[holdout_data['SessionId'].isin(session_valid)]
test = holdout_data[holdout_data['SessionId'].isin(session_test)]
valid = valid[valid['ItemId'].isin(to_predict)]
valid = valid[valid['SessionId'].groupby(valid['SessionId']).transform('size')>1]
test = test[test['ItemId'].isin(to_predict)]
test = test[test['SessionId'].groupby(test['SessionId']).transform('size')>1]
total_df = pd.concat([train_tr, valid, test])
df_net = df_net.loc[df_net['Follower'].isin(total_df['UserId'].unique())]
df_net = df_net.loc[df_net['Followee'].isin(total_df['UserId'].unique())]
user_map = dict(zip(total_df.UserId.unique(), range(total_df.UserId.nunique())))
item_map = dict(zip(total_df.ItemId.unique(), range(1, 1+total_df.ItemId.nunique())))
with open('user_id_map.tsv', 'w') as fout:
for k, v in user_map.iteritems():
fout.write(str(k) + '\t' + str(v) + '\n')
with open('item_id_map.tsv', 'w') as fout:
for k, v in item_map.iteritems():
fout.write(str(k) + '\t' + str(v) + '\n')
num_users = len(user_map)
num_items = len(item_map)
reset_id(total_df, user_map)
reset_id(train_tr, user_map)
reset_id(valid, user_map)
reset_id(test, user_map)
reset_id(df_net, user_map, 'Follower')
reset_id(df_net, user_map, 'Followee')
reset_id(total_df, item_map, 'ItemId')
reset_id(train_tr, item_map, 'ItemId')
reset_id(valid, item_map, 'ItemId')
reset_id(test, item_map, 'ItemId')
print 'Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique(), train_tr.groupby('SessionId').size().mean())
print 'Valid set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique(), valid.groupby('SessionId').size().mean())
print 'Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique(), test.groupby('SessionId').size().mean())
user2sessions = total_df.groupby('UserId')['SessionId'].apply(set).to_dict()
user_latest_session = []
for idx in xrange(num_users):
sessions = user2sessions[idx]
latest = []
for t in xrange(tmax+1):
if t == 0:
latest.append('NULL')
else:
sess_id_tmp = str(idx) + '_' + str(t-1)
if sess_id_tmp in sessions:
latest.append(sess_id_tmp)
else:
latest.append(latest[t-1])
user_latest_session.append(latest)
train_tr.to_csv('train.tsv', sep='\t', index=False)
valid.to_csv('valid.tsv', sep='\t', index=False)
test.to_csv('test.tsv', sep='\t', index=False)
df_net.to_csv('adj.tsv', sep='\t', index=False)
with open('latest_sessions.txt', 'w') as fout:
for idx in xrange(num_users):
fout.write(','.join(user_latest_session[idx]) + '\n')
if __name__ == '__main__':
day = 7
split_data(day)
| 7,559 | 46.54717 | 206 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/inits.py | import tensorflow as tf
import numpy as np
# DISCLAIMER:
# This file is derived from
# https://github.com/tkipf/gcn
# which is also under the MIT license
def uniform(shape, scale=0.05, name=None):
"""Uniform init."""
initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name)
def glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
def zeros(shape, name=None):
"""All zeros."""
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
def ones(shape, name=None):
"""All ones."""
initial = tf.ones(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
| 903 | 28.16129 | 95 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/aggregators.py | import tensorflow as tf
from .layers import Layer, Dense
from .inits import glorot, zeros
# Mean, MaxPool, GCN aggregators are collected from
# https://github.com/williamleif/GraphSAGE
# which is also under the MIT license
class MeanAggregator(Layer):
"""
Aggregates via mean followed by matmul and non-linearity.
"""
def __init__(self, input_dim, output_dim, neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu,
name=None, concat=False, **kwargs):
super(MeanAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['neigh_weights'] = glorot([neigh_input_dim, output_dim],
name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim],
name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
self_vecs, neigh_vecs = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, 1-self.dropout)
self_vecs = tf.nn.dropout(self_vecs, 1-self.dropout)
neigh_means = tf.reduce_mean(neigh_vecs, axis=1)
# [nodes] x [out_dim]
from_neighs = tf.matmul(neigh_means, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars["self_weights"])
if not self.concat:
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GCNAggregator(Layer):
"""
Aggregates via mean followed by matmul and non-linearity.
Same matmul parameters are used self vector and neighbor vectors.
"""
def __init__(self, input_dim, output_dim, neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(GCNAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['weights'] = glorot([neigh_input_dim, output_dim],
name='neigh_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
self_vecs, neigh_vecs = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, 1-self.dropout)
self_vecs = tf.nn.dropout(self_vecs, 1-self.dropout)
means = tf.reduce_mean(tf.concat([neigh_vecs,
tf.expand_dims(self_vecs, axis=1)], axis=1), axis=1)
# [nodes] x [out_dim]
output = tf.matmul(means, self.vars['weights'])
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class AttentionAggregator(Layer):
def __init__(self, input_dim, output_dim, neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(AttentionAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['weights'] = glorot([neigh_input_dim, output_dim],
name='neigh_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='neigh_bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
self_vecs, neigh_vecs = inputs
neigh_vecs = tf.nn.dropout(neigh_vecs, 1-self.dropout)
self_vecs = tf.nn.dropout(self_vecs, 1-self.dropout)
# Reshape from [batch_size, depth] to [batch_size, 1, depth] for matmul.
query = tf.expand_dims(self_vecs, 1)
neigh_self_vecs = tf.concat([neigh_vecs, query], axis=1)
score = tf.matmul(query, neigh_self_vecs, transpose_b=True)
score = tf.nn.softmax(score, dim=-1)
# alignment(score) shape is [batch_size, 1, depth]
context = tf.matmul(score, neigh_self_vecs)
context = tf.squeeze(context, [1])
# [nodes] x [out_dim]
output = tf.matmul(context, self.vars['weights'])
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class MaxPoolingAggregator(Layer):
""" Aggregates via max-pooling over MLP functions.
"""
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MaxPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
if model_size == "small":
hidden_dim = self.hidden_dim = 512
elif model_size == "big":
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim,
output_dim=hidden_dim,
act=tf.nn.relu,
dropout=dropout,
sparse_inputs=False,
logging=self.logging))
with tf.variable_scope(self.name + name + '_vars'):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim],
name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
self_vecs, neigh_vecs = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
# [nodes * sampled neighbors] x [hidden_dim]
h_reshaped = tf.reshape(neigh_h, (batch_size * num_neighbors, self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_max(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars["self_weights"])
if not self.concat:
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class MeanPoolingAggregator(Layer):
""" Aggregates via mean-pooling over MLP functions.
"""
def __init__(self, input_dim, output_dim, model_size="small", neigh_input_dim=None,
dropout=0., bias=False, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(MeanPoolingAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
if neigh_input_dim is None:
neigh_input_dim = input_dim
if name is not None:
name = '/' + name
else:
name = ''
if model_size == "small":
hidden_dim = self.hidden_dim = 512
elif model_size == "big":
hidden_dim = self.hidden_dim = 1024
self.mlp_layers = []
self.mlp_layers.append(Dense(input_dim=neigh_input_dim,
output_dim=hidden_dim,
act=tf.nn.relu,
dropout=dropout,
sparse_inputs=False,
logging=self.logging))
with tf.variable_scope(self.name + name + '_vars'):
self.vars['neigh_weights'] = glorot([hidden_dim, output_dim],
name='neigh_weights')
self.vars['self_weights'] = glorot([input_dim, output_dim],
name='self_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
self.neigh_input_dim = neigh_input_dim
def _call(self, inputs):
self_vecs, neigh_vecs = inputs
neigh_h = neigh_vecs
dims = tf.shape(neigh_h)
batch_size = dims[0]
num_neighbors = dims[1]
# [nodes * sampled neighbors] x [hidden_dim]
h_reshaped = tf.reshape(neigh_h, (batch_size * num_neighbors, self.neigh_input_dim))
for l in self.mlp_layers:
h_reshaped = l(h_reshaped)
neigh_h = tf.reshape(h_reshaped, (batch_size, num_neighbors, self.hidden_dim))
neigh_h = tf.reduce_mean(neigh_h, axis=1)
from_neighs = tf.matmul(neigh_h, self.vars['neigh_weights'])
from_self = tf.matmul(self_vecs, self.vars["self_weights"])
if not self.concat:
output = tf.add_n([from_self, from_neighs])
else:
output = tf.concat([from_self, from_neighs], axis=1)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
| 11,210 | 32.168639 | 92 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/test.py | #coding=utf-8
from __future__ import division
from __future__ import print_function
import os, sys
import argparse
import tensorflow as tf
import numpy as np
import time
from .utils import *
from .minibatch import MinibatchIterator
from .model import DGRec
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
def evaluate(sess, model, minibatch, val_or_test='val'):
epoch_val_cost = []
epoch_val_recall = []
epoch_val_ndcg = []
epoch_val_point = []
input_str = []
while not minibatch.end_val(val_or_test):
feed_dict = minibatch.next_val_minibatch_feed_dict(val_or_test)
x = np.reshape(feed_dict[minibatch.placeholders['input_x']], -1).tolist()
x_str = '_'.join([str(v) for v in x if v !=0])
input_str.append(x_str)
outs = sess.run([model.loss,model.sum_recall, model.sum_ndcg, model.point_count], feed_dict=feed_dict)
epoch_val_cost.append(outs[0])
epoch_val_recall.append(outs[1])
epoch_val_ndcg.append(outs[2])
epoch_val_point.append(outs[3])
return [np.mean(epoch_val_cost), np.sum(epoch_val_recall) / np.sum(epoch_val_point), np.sum(epoch_val_ndcg) / np.sum(epoch_val_point), epoch_val_recall, epoch_val_ndcg, input_str]
def construct_placeholders(args):
# Define placeholders
placeholders = {
'input_x': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='input_session'),
'input_y': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='output_session'),
'mask_y': tf.placeholder(tf.float32, shape=(args.batch_size, args.max_length), name='mask_x'),
'support_nodes_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2), name='support_nodes_layer1'),
'support_nodes_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2), name='support_nodes_layer2'),
'support_sessions_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2,\
args.max_length), name='support_sessions_layer1'),
'support_sessions_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2,\
args.max_length), name='support_sessions_layer2'),
'support_lengths_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2),
name='support_lengths_layer1'),
'support_lengths_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2),
name='support_lengths_layer2'),
}
return placeholders
def test(args, data):
adj_info = data[0]
latest_per_user_by_time = data[1]
user_id_map = data[2]
item_id_map = data[3]
train_df = data[4]
valid_df = data[5]
test_df = data[6]
args.num_items = len(item_id_map) + 1
args.num_users = len(user_id_map)
args.batch_size = 1
placeholders = construct_placeholders(args)
minibatch = MinibatchIterator(adj_info,
latest_per_user_by_time,
[train_df, valid_df, test_df],
placeholders,
batch_size=args.batch_size,
max_degree=args.max_degree,
num_nodes=len(user_id_map),
max_length=args.max_length,
samples_1_2=[args.samples_1, args.samples_2],
training=False)
dgrec = DGRec(args, minibatch.sizes, placeholders)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
ckpt = tf.train.get_checkpoint_state(args.ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Restore model from {}!'.format(args.ckpt_dir))
else:
print('Failed to restore model from {}'.format(args.ckpt_dir))
sys.exit(0)
ret = evaluate(sess, dgrec, minibatch, "test")
print("Test results(batch_size=1):",
"\tloss=", "{:.5f}".format(ret[0]),
"\trecall@20=", "{:.5f}".format(ret[1]),
"\tndcg=", "{:.5f}".format(ret[2]),
)
recall = ret[-3]
ndcg = ret[-2]
x_strs = ret[-1]
with open('metric_dist.txt','w') as f:
for idx in range(len(ret[-1])):
f.write(x_strs[idx] + '\t' + str(recall[idx]) + '\t' + str(ndcg[idx]) + '\n')
class Args():
training = False
global_only = False
local_only = False
epochs = 20
aggregator_type='attn'
act='linear'
batch_size = 200
max_degree = 50
num_users = -1
num_items = 100
concat=False
learning_rate=0.001
hidden_size = 100
embedding_size = 100
emb_user = 100
max_length=20
samples_1=10
samples_2=5
dim1 = 100
dim2 = 100
model_size = 'small'
dropout = 0.
weight_decay = 0.
print_every = 100
val_every = 500
ckpt_dir = 'save/'
def parseArgs():
args = Args()
parser = argparse.ArgumentParser(description='DGRec args')
parser.add_argument('--batch', default=200, type=int)
parser.add_argument('--model', default='attn', type=str)
parser.add_argument('--act', default='relu', type=str)
parser.add_argument('--degree', default=50, type=int)
parser.add_argument('--lr', default=0.002, type=float)
parser.add_argument('--hidden', default=100, type=int)
parser.add_argument('--embi', default=50, type=int)
parser.add_argument('--embu', default=50, type=int)
parser.add_argument('--samples1', default=10, type=int)
parser.add_argument('--samples2', default=5, type=int)
parser.add_argument('--dim1', default=100, type=int)
parser.add_argument('--dim2', default=100, type=int)
parser.add_argument('--dropout', default=0., type=float)
parser.add_argument('--l2', default=0., type=float)
parser.add_argument('--decay_steps', default=400, type=int)
parser.add_argument('--decay_rate', default=0.98, type=float)
parser.add_argument('--local', default=0, type=int)
parser.add_argument('--glb', default=0, type=int)
new_args = parser.parse_args()
args.batch_size = new_args.batch
args.aggregator_type= new_args.model
args.act= new_args.act
args.max_degree = new_args.degree
args.learning_rate = new_args.lr
args.hidden_size = new_args.hidden
args.embedding_size = new_args.embi
args.emb_user = new_args.embu
args.samples_1 = new_args.samples1
args.samples_2 = new_args.samples2
args.dim1 = new_args.dim1
args.dim2 = new_args.dim2
args.dropout = new_args.dropout
args.weight_decay = new_args.l2
args.decay_steps = new_args.decay_steps
args.decay_rate = new_args.decay_rate
args.local_only = new_args.local
args.global_only = new_args.glb
args.ckpt_dir = args.ckpt_dir + 'dgrec_batch{}'.format(args.batch_size)
args.ckpt_dir = args.ckpt_dir + '_model{}'.format(args.aggregator_type)
args.ckpt_dir = args.ckpt_dir + '_act{}'.format(args.act)
args.ckpt_dir = args.ckpt_dir + '_maxdegree{}'.format(args.max_degree)
args.ckpt_dir = args.ckpt_dir + '_lr{}'.format(args.learning_rate)
args.ckpt_dir = args.ckpt_dir + '_hidden{}'.format(args.hidden_size)
args.ckpt_dir = args.ckpt_dir + '_embi{}'.format(args.embedding_size)
args.ckpt_dir = args.ckpt_dir + '_embu{}'.format(args.emb_user)
args.ckpt_dir = args.ckpt_dir + '_samples1st{}'.format(args.samples_1)
args.ckpt_dir = args.ckpt_dir + '_samples2nd{}'.format(args.samples_2)
args.ckpt_dir = args.ckpt_dir + '_dim1st{}'.format(args.dim1)
args.ckpt_dir = args.ckpt_dir + '_dim2nd{}'.format(args.dim2)
args.ckpt_dir = args.ckpt_dir + '_dropout{}'.format(args.dropout)
args.ckpt_dir = args.ckpt_dir + '_l2reg{}'.format(args.weight_decay)
args.ckpt_dir = args.ckpt_dir + '_decaysteps{}'.format(args.decay_steps)
args.ckpt_dir = args.ckpt_dir + '_decayrate{}'.format(args.decay_rate)
args.ckpt_dir = args.ckpt_dir + '_global{}'.format(new_args.glb)
args.ckpt_dir = args.ckpt_dir + '_local{}'.format(new_args.local)
return args
def main(argv=None):
args = parseArgs()
print('Loading data..')
data = load_data('data/')
print("Done loading data..")
test(args, data)
if __name__ == '__main__':
tf.app.run()
| 8,420 | 39.485577 | 183 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/utils.py | #coding=utf-8
from __future__ import print_function
import numpy as np
import pandas as pd
import random
def load_adj(data_path):
df_adj = pd.read_csv(data_path + '/adj.tsv', sep='\t', dtype={0:np.int32, 1:np.int32})
return df_adj
def load_latest_session(data_path):
ret = []
for line in open(data_path + '/latest_sessions.txt'):
chunks = line.strip().split(',')
ret.append(chunks)
return ret
def load_map(data_path, name='user'):
if name == 'user':
file_path = data_path + '/user_id_map.tsv'
elif name == 'item':
file_path = data_path + '/item_id_map.tsv'
else:
raise NotImplementedError
id_map = {}
for line in open(file_path):
k, v = line.strip().split('\t')
id_map[k] = str(v)
return id_map
def load_data(data_path):
adj = load_adj(data_path)
latest_sessions = load_latest_session(data_path)
user_id_map = load_map(data_path, 'user')
item_id_map = load_map(data_path, 'item')
train = pd.read_csv(data_path + '/train.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
valid = pd.read_csv(data_path + '/valid.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
test = pd.read_csv(data_path + '/test.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
return [adj, latest_sessions, user_id_map, item_id_map, train, valid, test]
if __name__ == '__main__':
# test data loading.
data_path = 'path/to/data/'
data = load_data(data_path)
| 1,519 | 31.340426 | 105 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/model.py | import tensorflow as tf
import numpy as np
from .aggregators import *
from .layers import Dense
class DGRec(object):
def __init__(self, args, support_sizes, placeholders):
self.support_sizes = support_sizes
if args.aggregator_type == "mean":
self.aggregator_cls = MeanAggregator
elif args.aggregator_type == "seq":
self.aggregator_cls = SeqAggregator
elif args.aggregator_type == "maxpool":
self.aggregator_cls = MaxPoolingAggregator
elif args.aggregator_type == "meanpool":
self.aggregator_cls = MeanPoolingAggregator
elif args.aggregator_type == "gcn":
self.aggregator_cls = GCNAggregator
elif args.aggregator_type == "attn":
self.aggregator_cls = AttentionAggregator
else:
raise Exception("Unknown aggregator: ", self.aggregator_cls)
self.input_x = placeholders['input_x']
self.input_y = placeholders['input_y']
self.mask_y = placeholders['mask_y']
self.mask = tf.cast(self.mask_y, dtype=tf.float32)
self.point_count = tf.reduce_sum(self.mask)
self.support_nodes_layer1 = placeholders['support_nodes_layer1']
self.support_nodes_layer2 = placeholders['support_nodes_layer2']
self.support_sessions_layer1 = placeholders['support_sessions_layer1']
self.support_sessions_layer2 = placeholders['support_sessions_layer2']
self.support_lengths_layer1 = placeholders['support_lengths_layer1']
self.support_lengths_layer2 = placeholders['support_lengths_layer2']
self.training = args.training
self.concat = args.concat
if args.act == 'linear':
self.act = lambda x:x
elif args.act == 'relu':
self.act = tf.nn.relu
elif args.act == 'elu':
self.act = tf.nn.elu
else:
raise NotImplementedError
self.batch_size = args.batch_size
self.hidden_size = args.hidden_size
self.samples_1 = args.samples_1
self.samples_2 = args.samples_2
self.num_samples = [self.samples_1, self.samples_2]
self.n_items = args.num_items
self.n_users = args.num_users
self.emb_item = args.embedding_size
self.emb_user = args.emb_user
self.max_length = args.max_length
self.model_size = args.model_size
self.dropout = args.dropout
self.dim1 = args.dim1
self.dim2 = args.dim2
self.weight_decay = args.weight_decay
self.global_only = args.global_only
self.local_only = args.local_only
self.dims = [self.hidden_size, args.dim1, args.dim2]
self.dense_layers = []
self.loss = 0
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.lr = tf.maximum(1e-5, tf.train.exponential_decay(args.learning_rate,
self.global_step,
args.decay_steps,
args.decay_rate,
staircase=True))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.build()
def global_features(self):
self.user_embedding = tf.get_variable('user_embedding', [self.n_users, self.emb_user],\
initializer=tf.glorot_uniform_initializer())
feature_layer1 = tf.nn.embedding_lookup(self.user_embedding, self.support_nodes_layer1)
feature_layer2 = tf.nn.embedding_lookup(self.user_embedding, self.support_nodes_layer2)
dense_layer = Dense(self.emb_user,
self.hidden_size if self.global_only else self.hidden_size // 2,
act=tf.nn.relu,
dropout=self.dropout if self.training else 0.)
self.dense_layers.append(dense_layer)
feature_layer1 = dense_layer(feature_layer1)
feature_layer2 = dense_layer(feature_layer2)
return [feature_layer2, feature_layer1]
def local_features(self):
'''
Use the same rnn in decode function
'''
initial_state_layer1 = self.lstm_cell.zero_state(self.batch_size*self.samples_1*self.samples_2, dtype=tf.float32)
initial_state_layer2 = self.lstm_cell.zero_state(self.batch_size*self.samples_2, dtype=tf.float32)
inputs_1 = tf.nn.embedding_lookup(self.embedding, self.support_sessions_layer1)
inputs_2 = tf.nn.embedding_lookup(self.embedding, self.support_sessions_layer2)
outputs1, states1 = tf.nn.dynamic_rnn(cell=self.lstm_cell,
inputs=inputs_1,
sequence_length=self.support_lengths_layer1,
initial_state=initial_state_layer1,
dtype=tf.float32)
outputs2, states2 = tf.nn.dynamic_rnn(cell=self.lstm_cell,
inputs=inputs_2,
sequence_length=self.support_lengths_layer2,
initial_state=initial_state_layer2,
dtype=tf.float32)
# outputs: shape[batch_size, max_time, depth]
local_layer1 = states1.h
local_layer2 = states2.h
dense_layer = Dense(self.hidden_size,
self.hidden_size if self.local_only else self.hidden_size // 2,
act=tf.nn.relu,
dropout=self.dropout if self.training else 0.)
self.dense_layers.append(dense_layer)
local_layer1 = dense_layer(local_layer1)
local_layer2 = dense_layer(local_layer2)
return [local_layer2, local_layer1]
def global_and_local_features(self):
#global features
global_feature_layer2, global_feature_layer1 = self.global_features()
local_feature_layer2, local_feature_layer1 = self.local_features()
global_local_layer2 = tf.concat([global_feature_layer2, local_feature_layer2], -1)
global_local_layer1 = tf.concat([global_feature_layer1, local_feature_layer1], -1)
return [global_local_layer2, global_local_layer1]
def aggregate(self, hidden, dims, num_samples, support_sizes,
aggregators=None, name=None, concat=False, model_size="small"):
""" At each layer, aggregate hidden representations of neighbors to compute the hidden representations
at next layer.
Args:
samples: a list of samples of variable hops away for convolving at each layer of the
network. Length is the number of layers + 1. Each is a vector of node indices.
input_features: the input features for each sample of various hops away.
dims: a list of dimensions of the hidden representations from the input layer to the
final layer. Length is the number of layers + 1.
num_samples: list of number of samples for each layer.
support_sizes: the number of nodes to gather information from for each layer.
batch_size: the number of inputs (different for batch inputs and negative samples).
Returns:
The hidden representation at the final layer for all nodes in batch
"""
# length: number of layers + 1
hidden = hidden
new_agg = aggregators is None
if new_agg:
aggregators = []
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1:
aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=lambda x : x,
dropout=self.dropout if self.training else 0.,
name=name, concat=concat, model_size=model_size)
else:
aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=self.act,
dropout=self.dropout if self.training else 0.,
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [self.batch_size * support_sizes[hop],
num_samples[len(num_samples) - hop - 1],
dim_mult*dims[layer]]
h = aggregator((hidden[hop],
tf.reshape(hidden[hop + 1], neigh_dims)))
next_hidden.append(h)
hidden = next_hidden
return hidden[0], aggregators
def decode(self):
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size)
initial_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
time_major_x = tf.transpose(self.input_x)
inputs = tf.nn.embedding_lookup(self.embedding, time_major_x)
outputs, state = tf.nn.dynamic_rnn(cell=lstm_cell,
inputs=inputs,
initial_state=initial_state,
time_major=True,
dtype=tf.float32,
scope='decode_rnn')
# outputs: shape[max_time, batch_size, depth]
slices = tf.split(outputs, num_or_size_splits=self.max_length, axis=0)
return [tf.squeeze(t,[0]) for t in slices]
def step_by_step(self, features_0, features_1_2, dims, num_samples, support_sizes,
aggregators=None, name=None, concat=False, model_size="small"):
self.aggregators = None
outputs = []
for feature0 in features_0:
hidden = [feature0, features_1_2[0], features_1_2[1]]
output1, self.aggregators = self.aggregate(hidden, dims, num_samples, support_sizes,
aggregators=self.aggregators, concat=concat, model_size=self.model_size)
outputs.append(output1)
return tf.stack(outputs, axis=0)
def build(self):
self.embedding = embedding = tf.get_variable('item_embedding', [self.n_items, self.emb_item],\
initializer=tf.glorot_uniform_initializer())
features_0 = self.decode() # features of zero layer nodes.
#outputs with shape [max_time, batch_size, dim2]
if self.global_only:
features_1_2 = self.global_features()
elif self.local_only:
features_1_2 = self.local_features()
else:
features_1_2 = self.global_and_local_features()
outputs = self.step_by_step(features_0, features_1_2, self.dims, self.num_samples, self.support_sizes,
concat=self.concat)
concat_self = tf.concat([features_0, outputs], axis=-1)
# exchange first two dimensions.
self.transposed_outputs = tf.transpose(concat_self, [1,0,2])
self.loss = self._loss()
self.sum_recall = self._recall()
self.sum_ndcg = self._ndcg()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
def _loss(self):
reg_loss = 0.
xe_loss = 0.
fc_layer = Dense(self.dim2 + self.hidden_size, self.emb_item, act=lambda x:x, dropout=self.dropout if self.training else 0.)
self.dense_layers.append(fc_layer)
self.logits = logits = tf.matmul(fc_layer(tf.reshape(self.transposed_outputs, [-1, self.dim2+self.hidden_size])), self.embedding, transpose_b=True)
for dense_layer in self.dense_layers:
for var in dense_layer.vars.values():
reg_loss += self.weight_decay * tf.nn.l2_loss(var)
for aggregator in self.aggregators:
for var in aggregator.vars.values():
reg_loss += self.weight_decay * tf.nn.l2_loss(var)
reshaped_logits = tf.reshape(logits, [self.batch_size, self.max_length, self.n_items])
xe_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y,
logits=reshaped_logits,
name='softmax_loss')
xe_loss *= self.mask
return tf.reduce_sum(xe_loss) / self.point_count + reg_loss
def _ndcg(self):
predictions = tf.transpose(self.logits)
targets = tf.reshape(self.input_y, [-1])
pred_values = tf.expand_dims(tf.diag_part(tf.nn.embedding_lookup(predictions, targets)), -1)
tile_pred_values = tf.tile(pred_values, [1, self.n_items-1])
ranks = tf.reduce_sum(tf.cast(self.logits[:,1:] > tile_pred_values, dtype=tf.float32), -1) + 1
ndcg = 1. / (log2(1.0 + ranks))
mask = tf.reshape(self.mask, [-1])
ndcg *= mask
return tf.reduce_sum(ndcg)
def _recall(self):
predictions = self.logits
targets = tf.reshape(self.input_y, [-1])
recall_at_k = tf.nn.in_top_k(predictions, targets, k=20)
recall_at_k = tf.cast(recall_at_k, dtype=tf.float32)
mask = tf.reshape(self.mask, [-1])
recall_at_k *= mask
return tf.reduce_sum(recall_at_k)
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
| 14,311 | 50.855072 | 155 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/minibatch.py | #coding=utf-8
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import sys
from .neigh_samplers import UniformNeighborSampler
from .utils import *
np.random.seed(123)
class MinibatchIterator(object):
def __init__(self,
adj_info, # in pandas dataframe
latest_sessions,
data, # data list, either [train, valid] or [train, valid, test].
placeholders,
batch_size,
max_degree,
num_nodes,
max_length=20,
samples_1_2=[10,5],
training=True):
self.num_layers = 2 # Currently, only 2 layer is supported.
self.adj_info = adj_info
self.latest_sessions = latest_sessions
self.training = training
self.train_df, self.valid_df, self.test_df = data
self.all_data = pd.concat(data)
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.num_nodes = num_nodes
self.max_length = max_length
self.samples_1_2 = samples_1_2
self.sizes = [1, samples_1_2[1], samples_1_2[1]*samples_1_2[0]]
self.visible_time = self.user_visible_time()
self.test_adj, self.test_deg = self.construct_test_adj()
if self.training:
self.adj, self.deg = self.construct_adj()
self.train_session_ids = self._remove_infoless(self.train_df, self.adj, self.deg)
self.valid_session_ids = self._remove_infoless(self.valid_df, self.test_adj, self.test_deg)
self.sampler = UniformNeighborSampler(self.adj, self.visible_time, self.deg)
self.test_session_ids = self._remove_infoless(self.test_df, self.test_adj, self.test_deg)
self.padded_data, self.mask = self._padding_sessions(self.all_data)
self.test_sampler = UniformNeighborSampler(self.test_adj, self.visible_time, self.test_deg)
self.batch_num = 0
self.batch_num_val = 0
self.batch_num_test = 0
def user_visible_time(self):
'''
Find out when each user is 'visible' to her friends, i.e., every user's first click/watching time.
'''
visible_time = []
for l in self.latest_sessions:
timeid = max(loc for loc, val in enumerate(l) if val == 'NULL') + 1
visible_time.append(timeid)
assert timeid > 0 and timeid < len(l), 'Wrong when create visible time {}'.format(timeid)
return visible_time
def _remove_infoless(self, data, adj, deg):
'''
Remove users who have no sufficient friends.
'''
data = data.loc[deg[data['UserId']] != 0]
reserved_session_ids = []
print('sessions: {}\tratings: {}'.format(data.SessionId.nunique(), len(data)))
for sessid in data.SessionId.unique():
userid, timeid = sessid.split('_')
userid, timeid = int(userid), int(timeid)
cn_1 = 0
for neighbor in adj[userid, : ]:
if self.visible_time[neighbor] <= timeid and deg[neighbor] > 0:
cn_2 = 0
for second_neighbor in adj[neighbor, : ]:
if self.visible_time[second_neighbor] <= timeid:
break
cn_2 += 1
if cn_2 < self.max_degree:
break
cn_1 += 1
if cn_1 < self.max_degree:
reserved_session_ids.append(sessid)
return reserved_session_ids
def _padding_sessions(self, data):
'''
Pad zeros at the end of each session to length self.max_length for batch training.
'''
data = data.sort_values(by=['TimeId']).groupby('SessionId')['ItemId'].apply(list).to_dict()
new_data = {}
data_mask = {}
for k, v in data.items():
mask = np.ones(self.max_length, dtype=np.float32)
x = v[:-1]
y = v[1: ]
assert len(x) > 0
padded_len = self.max_length - len(x)
if padded_len > 0:
x.extend([0] * padded_len)
y.extend([0] * padded_len)
mask[-padded_len: ] = 0.
v.extend([0] * (self.max_length - len(v)))
x = x[:self.max_length]
y = y[:self.max_length]
v = v[:self.max_length]
new_data[k] = [np.array(x, dtype=np.int32), np.array(y, dtype=np.int32), np.array(v, dtype=np.int32)]
data_mask[k] = np.array(mask, dtype=bool)
return new_data, data_mask
def _batch_feed_dict(self, current_batch):
'''
Construct batch inputs.
'''
current_batch_sess_ids, samples, support_sizes = current_batch
feed_dict = {}
input_x = []
input_y = []
mask_y = []
timeids = []
for sessid in current_batch_sess_ids:
nodeid, timeid = sessid.split('_')
timeids.append(int(timeid))
x, y, _ = self.padded_data[sessid]
mask = self.mask[sessid]
input_x.append(x)
input_y.append(y)
mask_y.append(mask)
feed_dict.update({self.placeholders['input_x']: input_x})
feed_dict.update({self.placeholders['input_y']: input_y})
feed_dict.update({self.placeholders['mask_y']: mask_y})
feed_dict.update({self.placeholders['support_nodes_layer1']: samples[2]})
feed_dict.update({self.placeholders['support_nodes_layer2']: samples[1]})
#prepare sopportive user's recent sessions.
support_layers_session = []
support_layers_length = []
for layer in range(self.num_layers):
start = 0
t = self.num_layers - layer
support_sessions = []
support_lengths = []
for batch in range(self.batch_size):
timeid = timeids[batch]
support_nodes = samples[t][start: start + support_sizes[t]]
for support_node in support_nodes:
support_session_id = str(self.latest_sessions[support_node][timeid])
support_session = self.padded_data[support_session_id][2]
#print(support_session)
length = np.count_nonzero(support_session)
support_sessions.append(support_session)
support_lengths.append(length)
start += support_sizes[t]
support_layers_session.append(support_sessions)
support_layers_length.append(support_lengths)
feed_dict.update({self.placeholders['support_sessions_layer1']:support_layers_session[0]})
feed_dict.update({self.placeholders['support_sessions_layer2']:support_layers_session[1]})
feed_dict.update({self.placeholders['support_lengths_layer1']:support_layers_length[0]})
feed_dict.update({self.placeholders['support_lengths_layer2']:support_layers_length[1]})
return feed_dict
def sample(self, nodeids, timeids, sampler):
'''
Sample neighbors recursively. First-order, then second-order, ...
'''
samples = [nodeids]
support_size = 1
support_sizes = [support_size]
first_or_second = ['second', 'first']
for k in range(self.num_layers):
t = self.num_layers - k - 1
node = sampler([samples[k], self.samples_1_2[t], timeids, first_or_second[t], support_size])
support_size *= self.samples_1_2[t]
samples.append(np.reshape(node, [support_size * self.batch_size,]))
support_sizes.append(support_size)
return samples, support_sizes
def next_val_minibatch_feed_dict(self, val_or_test='val'):
'''
Construct evaluation or test inputs.
'''
if val_or_test == 'val':
start = self.batch_num_val * self.batch_size
self.batch_num_val += 1
data = self.valid_session_ids
elif val_or_test == 'test':
start = self.batch_num_test * self.batch_size
self.batch_num_test += 1
data = self.test_session_ids
else:
raise NotImplementedError
current_batch_sessions = data[start: start + self.batch_size]
nodes = [int(sessionid.split('_')[0]) for sessionid in current_batch_sessions]
timeids = [int(sessionid.split('_')[1]) for sessionid in current_batch_sessions]
samples, support_sizes = self.sample(nodes, timeids, self.test_sampler)
return self._batch_feed_dict([current_batch_sessions, samples, support_sizes])
def next_train_minibatch_feed_dict(self):
'''
Generate next training batch data.
'''
start = self.batch_num * self.batch_size
self.batch_num += 1
current_batch_sessions = self.train_session_ids[start: start + self.batch_size]
nodes = [int(sessionid.split('_')[0]) for sessionid in current_batch_sessions]
timeids = [int(sessionid.split('_')[1]) for sessionid in current_batch_sessions]
samples, support_sizes = self.sample(nodes, timeids, self.sampler)
return self._batch_feed_dict([current_batch_sessions, samples, support_sizes])
def construct_adj(self):
'''
Construct adj table used during training.
'''
adj = self.num_nodes*np.ones((self.num_nodes+1, self.max_degree), dtype=np.int32)
deg = np.zeros((self.num_nodes,))
missed = 0
for nodeid in self.train_df.UserId.unique():
neighbors = np.array([neighbor for neighbor in
self.adj_info.loc[self.adj_info['Follower']==nodeid].Followee.unique()], dtype=np.int32)
deg[nodeid] = len(neighbors)
if len(neighbors) == 0:
missed += 1
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[nodeid, :] = neighbors
#print('Unexpected missing during constructing adj list: {}'.format(missed))
return adj, deg
def construct_test_adj(self):
'''
Construct adj table used during evaluation or testing.
'''
adj = self.num_nodes*np.ones((self.num_nodes+1, self.max_degree), dtype=np.int32)
deg = np.zeros((self.num_nodes,))
missed = 0
data = self.all_data
for nodeid in data.UserId.unique():
neighbors = np.array([neighbor for neighbor in
self.adj_info.loc[self.adj_info['Follower']==nodeid].Followee.unique()], dtype=np.int32)
deg[nodeid] = len(neighbors)
if len(neighbors) == 0:
missed += 1
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[nodeid, :] = neighbors
#print('Unexpected missing during constructing adj list: {}'.format(missed))
return adj, deg
def end(self):
'''
Indicate whether we finish a pass over all training samples.
'''
return self.batch_num * self.batch_size > len(self.train_session_ids) - self.batch_size
def end_val(self, val_or_test='val'):
'''
Indicate whether we finish a pass over all testing or evaluation samples.
'''
batch_num = self.batch_num_val if val_or_test == 'val' else self.batch_num_test
data = self.valid_session_ids if val_or_test == 'val' else self.test_session_ids
end = batch_num * self.batch_size > len(data) - self.batch_size
if end:
if val_or_test == 'val':
self.batch_num_val = 0
elif val_or_test == 'test':
self.batch_num_test = 0
else:
raise NotImplementedError
if end:
self.batch_num_val = 0
return end
def shuffle(self):
'''
Shuffle training data.
'''
self.train_session_ids = np.random.permutation(self.train_session_ids)
self.batch_num = 0
if __name__ == '__main__':
data = load_data('path/to/data')
adj_info = data[0]
latest_per_user_by_time = data[1]
user_id_map = data[2]
item_id_map = data[3]
train_df = data[4]
valid_df = data[5]
test_df = data[6]
minibatch = MinibatchIterator(adj_info,
latest_per_user_by_time,
[train_df, valid_df, test_df],
None, #placeholders,
batch_size=1,
max_degree=50,
num_nodes=len(user_id_map),
max_length=20,
samples_1_2=[10, 5])
| 13,192 | 41.15016 | 120 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/layers.py | from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .inits import zeros
# DISCLAIMER:
# This file is forked from
# https://github.com/tkipf/gcn
# which is also under the MIT license
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, dropout=0., weight_decay=0.,
act=tf.nn.relu, placeholders=None, bias=True, featureless=False,
sparse_inputs=False, **kwargs):
super(Dense, self).__init__(**kwargs)
self.dropout = dropout
self.weight_decay = weight_decay
self.act = act
self.featureless = featureless
self.bias = bias
self.input_dim = input_dim
self.output_dim = output_dim
# helper variable for sparse dropout
self.sparse_inputs = sparse_inputs
if sparse_inputs:
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = tf.get_variable('weights', shape=(input_dim, output_dim),
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = tf.matmul(x, self.vars['weights'])
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
| 3,731 | 31.172414 | 105 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/neigh_samplers.py | from __future__ import division
from __future__ import print_function
import numpy as np
"""
Classes that are used to sample node neighborhoods
"""
class UniformNeighborSampler(object):
"""
Uniformly samples neighbors.
Assumes that adj lists are padded with random re-sampling
"""
def __init__(self, adj_info, visible_time, deg):
self.adj_info = adj_info
self.visible_time = visible_time
self.deg = deg
def __call__(self, inputs):
nodeids, num_samples, timeids, first_or_second, support_size = inputs
adj_lists = []
for idx in range(len(nodeids)):
node = nodeids[idx]
timeid = timeids[idx // support_size]
adj = self.adj_info[node, :]
neighbors = []
for neighbor in adj:
if first_or_second == 'second':
if self.visible_time[neighbor] <= timeid:
neighbors.append(neighbor)
elif first_or_second == 'first':
if self.visible_time[neighbor] <= timeid and self.deg[neighbor] > 0:
for second_neighbor in self.adj_info[neighbor]:
if self.visible_time[second_neighbor] <= timeid:
neighbors.append(neighbor)
break
assert len(neighbors) > 0
if len(neighbors) < num_samples:
neighbors = np.random.choice(neighbors, num_samples, replace=True)
elif len(neighbors) > num_samples:
neighbors = np.random.choice(neighbors, num_samples, replace=False)
adj_lists.append(neighbors)
return np.array(adj_lists, dtype=np.int32)
| 1,740 | 37.688889 | 88 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/__init__.py | from __future__ import print_function
from __future__ import division
| 70 | 22.666667 | 37 | py |
RecommenderSystems | RecommenderSystems-master/socialRec/dgrec/train.py | #coding=utf-8
from __future__ import division
from __future__ import print_function
import os, sys
import argparse
import tensorflow as tf
import numpy as np
import time
from .utils import *
from .minibatch import MinibatchIterator
from .model import DGRec
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
def evaluate(sess, model, minibatch, val_or_test='val'):
epoch_val_cost = []
epoch_val_recall = []
epoch_val_ndcg = []
epoch_val_point = []
while not minibatch.end_val(val_or_test):
feed_dict = minibatch.next_val_minibatch_feed_dict(val_or_test)
outs = sess.run([model.loss,model.sum_recall, model.sum_ndcg, model.point_count], feed_dict=feed_dict)
epoch_val_cost.append(outs[0])
epoch_val_recall.append(outs[1])
epoch_val_ndcg.append(outs[2])
epoch_val_point.append(outs[3])
return np.mean(epoch_val_cost), np.sum(epoch_val_recall) / np.sum(epoch_val_point), np.sum(epoch_val_ndcg) / np.sum(epoch_val_point)
def construct_placeholders(args):
# Define placeholders
placeholders = {
'input_x': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='input_session'),
'input_y': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='output_session'),
'mask_y': tf.placeholder(tf.float32, shape=(args.batch_size, args.max_length), name='mask_x'),
'support_nodes_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2), name='support_nodes_layer1'),
'support_nodes_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2), name='support_nodes_layer2'),
'support_sessions_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2,\
args.max_length), name='support_sessions_layer1'),
'support_sessions_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2,\
args.max_length), name='support_sessions_layer2'),
'support_lengths_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2),
name='support_lengths_layer1'),
'support_lengths_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2),
name='support_lengths_layer2'),
}
return placeholders
def train(args, data):
adj_info = data[0]
latest_per_user_by_time = data[1]
user_id_map = data[2]
item_id_map = data[3]
train_df = data[4]
valid_df = data[5]
test_df = data[6]
args.num_items = len(item_id_map) + 1
args.num_users = len(user_id_map)
placeholders = construct_placeholders(args)
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
ckpt_path = os.path.join(args.ckpt_dir, 'model.ckpt')
minibatch = MinibatchIterator(adj_info,
latest_per_user_by_time,
[train_df, valid_df, test_df],
placeholders,
batch_size=args.batch_size,
max_degree=args.max_degree,
num_nodes=len(user_id_map),
max_length=args.max_length,
samples_1_2=[args.samples_1, args.samples_2])
dgrec = DGRec(args, minibatch.sizes, placeholders)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
total_steps = 0
avg_time = 0.
patience = 10
inc = 0
early_stopping = False
highest_val_recall = -1.0
start_time = time.time()
for epoch in range(args.epochs):
minibatch.shuffle()
iter_cn = 0
print('Epoch: %04d' % (epoch + 1))
epoch_val_cost = []
epoch_val_recall = []
epoch_val_ndcg = []
epoch_train_cost = []
epoch_train_recall = []
epoch_train_ndcg = []
epoch_train_point = []
while not minibatch.end() and not early_stopping:
t = time.time()
feed_dict = minibatch.next_train_minibatch_feed_dict()
outs = sess.run([dgrec.opt_op, dgrec.loss, dgrec.sum_recall, dgrec.sum_ndcg, dgrec.point_count], feed_dict=feed_dict)
train_cost = outs[1]
epoch_train_cost.append(train_cost)
epoch_train_recall.append(outs[2])
epoch_train_ndcg.append(outs[3])
epoch_train_point.append(outs[4])
# Print results
avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)
if iter_cn % args.val_every == 0:
ret = evaluate(sess, dgrec, minibatch)
epoch_val_cost.append(ret[0])
epoch_val_recall.append(ret[1])
epoch_val_ndcg.append(ret[2])
if ret[1] >= highest_val_recall:
saver.save(sess, ckpt_path, global_step=total_steps)
highest_val_recall = ret[1]
inc = 0
print("Iter:", '%d' % iter_cn,
"val_loss=", "{:.5f}".format(epoch_val_cost[-1]),
"val_recall@20=", "{:.5f}".format(epoch_val_recall[-1]),
"val_ndcg=", "{:.5f}".format(epoch_val_ndcg[-1]),
"dump model!"
)
else:
inc += 1
if inc >= patience:
early_stopping = True
break
if total_steps % args.print_every == 0:
print("Iter:", '%d' % iter_cn,
"train_loss=", "{:.5f}".format(np.mean(epoch_train_cost)),
"train_recall@20=", "{:.5f}".format(np.sum(epoch_train_recall)/np.sum(epoch_train_point)),
"train_ndcg=", "{:.5f}".format(np.sum(epoch_train_ndcg)/np.sum(epoch_train_point)),
"val_loss=", "{:.5f}".format(epoch_val_cost[-1]),
"val_recall@20=", "{:.5f}".format(epoch_val_recall[-1]),
"val_ndcg=", "{:.5f}".format(epoch_val_ndcg[-1]),
"time=", "{:.5f}s".format(avg_time))
sys.stdout.flush()
total_steps += 1
iter_cn += 1
if early_stopping:
print('Early stop at epoch: {}, total training steps: {}'.format(epoch, total_steps))
break
end_time = time.time()
print('-----------{} seconds per batch iteration-------------'.format((end_time - start_time) / total_steps))
print('Parameter settings: {}'.format(args.ckpt_dir))
print('Optimization finished!\tStart testing...')
ret = evaluate(sess, dgrec, minibatch, 'test')
print('Test results:',
'\tLoss:{}'.format(ret[0]),
'\tRecall@20:{}'.format(ret[1]),
'\tNDCG:{}'.format(ret[2]))
class Args():
training = True
global_only = False
local_only = False
epochs = 20
aggregator_type='attn'
act='relu'
batch_size = 200
max_degree = 50
num_users = -1
num_items = 100
concat=False
learning_rate=0.001
hidden_size = 100
embedding_size = 50
emb_user = 50
max_length=20
samples_1=10
samples_2=5
dim1 = 100
dim2 = 100
model_size = 'small'
dropout = 0.
weight_decay = 0.
print_every = 100
val_every = 500
ckpt_dir = 'save/'
def parseArgs():
args = Args()
parser = argparse.ArgumentParser(description='DGRec args')
parser.add_argument('--batch', default=200, type=int)
parser.add_argument('--model', default='attn', type=str)
parser.add_argument('--act', default='relu', type=str)
parser.add_argument('--degree', default=50, type=int)
parser.add_argument('--lr', default=0.002, type=float)
parser.add_argument('--hidden', default=100, type=int)
parser.add_argument('--embi', default=50, type=int)
parser.add_argument('--embu', default=50, type=int)
parser.add_argument('--samples1', default=10, type=int)
parser.add_argument('--samples2', default=5, type=int)
parser.add_argument('--dim1', default=100, type=int)
parser.add_argument('--dim2', default=100, type=int)
parser.add_argument('--dropout', default=0., type=float)
parser.add_argument('--l2', default=0., type=float)
parser.add_argument('--decay_steps', default=400, type=int)
parser.add_argument('--decay_rate', default=0.98, type=float)
parser.add_argument('--local', default=0, type=int)
parser.add_argument('--glb', default=0, type=int)
new_args = parser.parse_args()
args.batch_size = new_args.batch
args.aggregator_type = new_args.model
args.act = new_args.act
args.max_degree = new_args.degree
args.learning_rate = new_args.lr
args.hidden_size = new_args.hidden
args.embedding_size = new_args.embi
args.emb_user = new_args.embu
args.samples_1 = new_args.samples1
args.samples_2 = new_args.samples2
args.dim1 = new_args.dim1
args.dim2 = new_args.dim2
args.dropout = new_args.dropout
args.weight_decay = new_args.l2
args.decay_steps = new_args.decay_steps
args.decay_rate = new_args.decay_rate
args.local_only = new_args.local
args.global_only = new_args.glb
args.ckpt_dir = args.ckpt_dir + 'dgrec_batch{}'.format(args.batch_size)
args.ckpt_dir = args.ckpt_dir + '_model{}'.format(args.aggregator_type)
args.ckpt_dir = args.ckpt_dir + '_act{}'.format(args.act)
args.ckpt_dir = args.ckpt_dir + '_maxdegree{}'.format(args.max_degree)
args.ckpt_dir = args.ckpt_dir + '_lr{}'.format(args.learning_rate)
args.ckpt_dir = args.ckpt_dir + '_hidden{}'.format(args.hidden_size)
args.ckpt_dir = args.ckpt_dir + '_embi{}'.format(args.embedding_size)
args.ckpt_dir = args.ckpt_dir + '_embu{}'.format(args.emb_user)
args.ckpt_dir = args.ckpt_dir + '_samples1st{}'.format(args.samples_1)
args.ckpt_dir = args.ckpt_dir + '_samples2nd{}'.format(args.samples_2)
args.ckpt_dir = args.ckpt_dir + '_dim1st{}'.format(args.dim1)
args.ckpt_dir = args.ckpt_dir + '_dim2nd{}'.format(args.dim2)
args.ckpt_dir = args.ckpt_dir + '_dropout{}'.format(args.dropout)
args.ckpt_dir = args.ckpt_dir + '_l2reg{}'.format(args.weight_decay)
args.ckpt_dir = args.ckpt_dir + '_decaysteps{}'.format(args.decay_steps)
args.ckpt_dir = args.ckpt_dir + '_decayrate{}'.format(args.decay_rate)
args.ckpt_dir = args.ckpt_dir + '_global{}'.format(new_args.glb)
args.ckpt_dir = args.ckpt_dir + '_local{}'.format(new_args.local)
return args
def main(argv=None):
args = parseArgs()
print('Loading training data..')
data = load_data('data/data/')
print("Training data loaded!")
train(args, data)
if __name__ == '__main__':
tf.app.run()
| 10,966 | 40.541667 | 141 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/markovChains/sampler.py | #coding=utf-8
'''
Author: Weiping Song
Contact: songweiping@pku.edu.cn
Reference: https://github.com/kang205/SASRec/blob/master/sampler.py
'''
# Disclaimer:
# Part of this file is derived from
# https://github.com/kang205/SASRec/
import numpy as np
from multiprocessing import Process, Queue
def random_neg(pos, n, s):
'''
p: positive one
n: number of items
s: size of samples.
'''
neg = set()
for _ in range(s):
t = np.random.randint(1, n+1)
while t in pos or t in neg:
t = np.random.randint(1, n+1)
neg.add(t)
return list(neg)
def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand'):
'''
data: list of train data, key: user, value: a set of all user's clicks.
tensors: list of train tensors, each element of list is also a list.
masks: list of train masks, each element of list is also a list.
batch_size: number of samples in a batch.
neg_size: number of negative samples.
'''
num_samples = np.array([len(data[str(u)]) for u in range(1, n_users+1)])
prob_ = num_samples / (1.0 * np.sum(num_samples))
def sample():
'''
# sample a user based on behavior frequency.
#TODO: more efficient non-uniform sampling method.
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
user = np.random.choice(a=range(1,1+n_users), p=prob_)
u = str(user)
# sample a slice from user u randomly.
idx = np.random.randint(1, len(data[u]))
start = 0 if idx >= max_len else max_len - idx
len_of_item = max_len - start
# Assume max_len is set to 5, and we want to predict the 4-th entry in the sequence
# Then the length of historical items is 3.
# The following code will return the array like [0, 0, x, x, x]
# i.e. the zero is padded to the left.
seq = np.zeros([max_len], dtype=np.int32)
seq[start:] = data[u][idx-len_of_item:idx]
pos = data[u][idx]
neg = np.zeros([neg_size], dtype=np.int32)
if neg_method == 'rand':
neg = random_neg([pos], n_items, neg_size)
else:
raise NotImplementedError
return (user, seq, pos, neg)
np.random.seed(SEED)
while True:
one_batch = []
for i in range(batch_size):
one_batch.append(sample())
result_queue.put(list(zip(*one_batch)))
class Sampler(object):
def __init__(self, data, n_items, n_users, batch_size=128, max_len=20, neg_size=10, n_workers=10, neg_method='rand'):
self.result_queue = Queue(maxsize=int(2e5))
self.processors = []
for i in range(n_workers):
self.processors.append(
Process(target=sample_function, args=(data,
n_items,
n_users,
batch_size,
max_len,
neg_size,
self.result_queue,
np.random.randint(2e9),
neg_method)))
self.processors[-1].daemon = True
self.processors[-1].start()
def next_batch(self):
return self.result_queue.get()
def close(self):
for p in self.processors:
p.terminate()
p.join() | 3,798 | 34.504673 | 127 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/markovChains/utils.py | #coding=utf-8
'''
Author: Weiping Song
Contact: Weiping Song
'''
import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
# path of folder that contains all the datas.
data_path = 'data/'
class Dictionary(object):
def __init__(self):
self.item2idx = {}
self.idx2item = []
self.counter = Counter()
def add_item(self, item):
self.counter[item] +=1
def prep_dict(self):
for item in self.counter:
if item not in self.item2idx:
self.idx2item.append(item)
self.item2idx[item] = len(self.idx2item)
def __len__(self):
return len(self.idx2item)
class Corpus(object):
def __init__(self, ItemId):
self.dict = Dictionary()
for item in ItemId:
self.dict.add_item(item)
self.dict.prep_dict()
def data_generator(args):
path_to_data= data_path + args.data + '/'
if not os.path.exists(path_to_data + args.data + '_train_tr.json'):
tr_df = pd.read_csv(path_to_data + args.data + '_train_tr.txt', sep='\t')
val_df = pd.read_csv(path_to_data + args.data + '_train_valid.txt', sep='\t')
test_df = pd.read_csv(path_to_data + args.data + '_test.txt', sep='\t')
corpus_item = Corpus(tr_df['ItemId'])
corpus_user = Corpus(tr_df['UserId'])
np.save(path_to_data + args.data + '_item_dict', np.asarray(corpus_item.dict.idx2item))
np.save(path_to_data + args.data + '_user_dict', np.asarray(corpus_user.dict.idx2item))
tr = tr_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
val = val_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
test = test_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
_ = prepare_data(corpus_item, corpus_user, tr, args.data + '_train_tr', path_to_data)
_ = prepare_data(corpus_item, corpus_user, val, args.data + '_train_valid',path_to_data)
_ = prepare_data(corpus_item, corpus_user, test, args.data + '_test', path_to_data)
with open(path_to_data + args.data + '_train_tr.json', 'r') as fp:
train_data = json.load(fp)
with open(path_to_data + args.data + '_train_valid.json', 'r') as fp:
val_data = json.load(fp)
with open(path_to_data + args.data + '_test.json', 'r') as fp:
test_data = json.load(fp)
item2idx = np.load(path_to_data + args.data + '_item_dict.npy')
user2idx = np.load(path_to_data + args.data + '_user_dict.npy')
n_items = item2idx.size
n_users = user2idx.size
return [train_data, val_data, test_data, n_items, n_users]
def prepare_data(corpus_item, corpus_user, data, dname, path_to_data):
ret = {}
user_str_ids = data.keys()
for u in user_str_ids:
u_int_id = corpus_user.dict.item2idx[u]
i_int_ids = []
item_str_ids = data[u]
for i in item_str_ids:
i_int_ids.append(corpus_item.dict.item2idx[i])
ret[u_int_id] = i_int_ids
with open(path_to_data + dname + '.json', 'w') as fp:
json.dump(ret, fp)
return ret
def prepare_eval_test(data, batch_size, max_test_len=100):
if batch_size < 2:
batch_size = 2
uids = data.keys()
all_u = []
all_inp = []
all_pos = []
for u in uids:
#all_u.append(int(u))
#cur_u = []
itemids = data[u]
nb_test = min(max_test_len, len(itemids)) - 1
all_u.extend([int(u)] * nb_test)
for i in range(1, nb_test+1):
pos = itemids[i]
inp = np.zeros([max_test_len], dtype=np.int32)
start = max_test_len - i
len_of_item = i
inp[start:] = itemids[i-len_of_item: i]
#inp = np.zeros([max_test_len], dtype=np.int32)
#pos = np.zeros([max_test_len], dtype=np.int32)
#l = min(max_test_len, len(itemids))
#inp[:l] = itemids[:l]
#pos[:l-1] = itemids[1:l]
all_inp.append(inp)
all_pos.append(pos)
num_batches = int(len(all_u) / batch_size)
batches = []
for i in range(num_batches):
batch_u = all_u[i*batch_size: (i+1)*batch_size]
batch_inp = all_inp[i*batch_size: (i+1)*batch_size]
batch_pos = all_pos[i*batch_size: (i+1)*batch_size]
batches.append((batch_u, batch_inp, batch_pos))
if num_batches * batch_size < len(all_u):
batches.append((all_u[num_batches * batch_size:], all_inp[num_batches * batch_size:], all_pos[num_batches * batch_size:]))
return batches
def preprocess_session(dname):
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
if dname == 'tmall':
data.columns = ['SessionId', 'ItemId', 'Time']
else:
raise NotImplementedError
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.ItemId.nunique()))
session_lengths = data.groupby('SessionId').size()
print('Average session length: {}'.format(session_lengths.mean()))
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
session_lengths = data.groupby('SessionId').size()
print('Average session length after removing sessions with less than two event: {}'.format(session_lengths.mean()))
session_max_times = data.groupby('SessionId').Time.max()
tmax = data.Time.max()
session_train = session_max_times[session_max_times < tmax-86400*2].index
session_test = session_max_times[session_max_times >= tmax-86400*2].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>2].index)]
test_session = test.SessionId.unique()
test_session_ = np.random.choice(test_session, int(len(test_session) / 2), replace=False)
test_ = test.loc[test['SessionId'].isin(test_session_)]
val_ = test.loc[~test['SessionId'].isin(test_session_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
header = columns = ['SessionId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False)
def preprocess_sequence(dname):
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
if dname == 'delicious':
data.columns = ['user', 'item', 'TimeStr']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d').timestamp())
del(data['TimeStr'])
elif dname == 'googlelocal' or dname == 'elec' or dname == 'game' or dname == 'ml1m' or \
dname == 'home' or dname == 'beauty' or dname == 'book' or dname == 'app' or dname == 'clothing':
data.columns = ['user', 'item', 'Time']
elif dname == 'gowalla':
data.columns = ['user', 'TimeStr', 'lat', 'long', 'item']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ').timestamp())
del(data['lat'])
del(data['long'])
del(data['TimeStr'])
elif dname == 'brightkite':
data.columns = ['user', 'item', 'TimeStr']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ').timestamp())
del(data['TimeStr'])
else:
raise NotImplementedError
event_lengths = data.groupby('user').size()
print('Average check-ins per user: {}'.format(event_lengths.mean()))
data = data[np.in1d(data.user, event_lengths[event_lengths>10].index)]
item_supports = data.groupby('item').size()
# 50 for delicious, 10 for gowalla
data = data[np.in1d(data.item, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.item.nunique()))
event_lengths = data.groupby('user').size()
data = data[np.in1d(data.user, event_lengths[event_lengths>=10].index)]
event_lengths = data.groupby('user').size()
print('Average check-ins per user after removing sessions with one event: {}'.format(event_lengths.mean()))
tmin = data.Time.min()
tmax = data.Time.max()
pivot = (tmax-tmin) * 0.9 + tmin
train = data.loc[data['Time'] < pivot]
test = data.loc[data['Time'] >= pivot]
tr_event_lengths = train.groupby('user').size()
train = train[np.in1d(train.user, tr_event_lengths[tr_event_lengths>3].index)]
print('Average (train) check-ins per user: {}'.format(tr_event_lengths.mean()))
user_to_predict = train.user.unique()
test = test[test['user'].isin(user_to_predict)]
item_to_predict = train.item.unique()
test = test[test['item'].isin(item_to_predict)]
test_event_lengths = test.groupby('user').size()
test = test[np.in1d(test.user, test_event_lengths[test_event_lengths>3].index)]
print('Average (test) check-ins per user: {}'.format(test_event_lengths.mean()))
test_user = test.user.unique()
test_user_ = np.random.choice(test_user, int(len(test_user) / 2), replace=False)
test_ = test.loc[test['user'].isin(test_user_)]
val_ = test.loc[~test['user'].isin(test_user_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
header = columns = ['user', 'item', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False)
if __name__ == '__main__':
#preprocess_sequence('delicious')
#preprocess_sequence('gowalla')
#preprocess_sequence('game')
#preprocess_sequence('elec')
preprocess_sequence('home')
preprocess_sequence('beauty')
preprocess_sequence('app')
preprocess_sequence('clothing')
preprocess_sequence('book')
| 10,819 | 41.101167 | 130 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/markovChains/model.py | #coding=utf-8
'''
Author: Chence Shi
Contact: chenceshi@pku.edu.cn
'''
import tensorflow as tf
import sys
import os
import numpy as np
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
class FOSSIL(object):
def __init__(self, args, n_items, n_users):
self.args = args
self.n_items = n_items
self.n_users = n_users
self._build()
self.saver = tf.train.Saver()
def _build(self):
self.inp = tf.placeholder(tf.int32, shape=(None, None), name='inp') # if maxlen is 5, valid len of sample i is 3, then inp[i] = [0, 0, x, x, x]
self.user = tf.placeholder(tf.int32, shape=(None), name='user')
self.pos = tf.placeholder(tf.int32, shape=(None), name='pos')
self.neg = tf.placeholder(tf.int32, shape=(None, self.args.neg_size), name='neg')
self.lr = tf.placeholder(tf.float32, shape=None, name='lr')
self.item_embedding1 = tf.get_variable('item_embedding1',
shape=(self.n_items+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.item_embedding2 = tf.get_variable('item_embedding2',
shape=(self.n_items+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.user_bias = tf.get_variable('user_bias',
shape=(self.n_users+1, self.args.order),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.global_bias = tf.get_variable('global_bias',
shape=(self.args.order),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.constant_initializer(0.))
self.item_bias = tf.get_variable('item_bias',
shape=(self.n_items+1),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.constant_initializer(0.))
mask_inp = tf.expand_dims(tf.to_float(tf.not_equal(self.inp, 0)), -1) #(batch, maxlen, 1)
len_inp = tf.reduce_sum(tf.squeeze(mask_inp, axis=2), axis=1) #(batch)
item_embed = tf.nn.embedding_lookup(self.item_embedding1, self.inp) * mask_inp #(batch, maxlen, k)
long_term = tf.reduce_sum(item_embed, axis=1) #(batch, k)
long_term = tf.expand_dims(tf.pow(len_inp, -self.args.alpha), -1) * long_term #(batch, k)
effective_order = tf.minimum(len_inp, self.args.order) #(batch)
effective_order = tf.expand_dims(tf.to_float(tf.sequence_mask(effective_order,self.args.order)), -1) #(batch, order, 1)
short_term = tf.nn.embedding_lookup(self.user_bias, self.user) #(batch, order)
short_term = tf.expand_dims(short_term + self.global_bias, axis=-1) #(batch, order, 1)
short_term = short_term * item_embed[:, :-1-self.args.order:-1] #(batch, order, k)
short_term = tf.reduce_sum(short_term * effective_order, axis=1) #(batch, k)
### for train only
pos_bias = tf.nn.embedding_lookup(self.item_bias, self.pos) #(batch)
pos_embed = tf.nn.embedding_lookup(self.item_embedding2, self.pos) #(batch, k)
neg_bias = tf.nn.embedding_lookup(self.item_bias, self.neg) #(batch, neg_size)
neg_embed = tf.nn.embedding_lookup(self.item_embedding2, self.neg) #(batch, neg_size, k)
temp_vec = short_term + long_term #(batch, k)
pos_score = pos_bias + tf.reduce_sum(temp_vec*pos_embed, axis=1) #(batch)
neg_score = neg_bias + tf.reduce_sum(tf.expand_dims(temp_vec, axis=1) * neg_embed, axis=2) #(batch, neg_size)
neg_score = tf.reduce_mean(neg_score, axis=1) #(batch)
loss = -tf.reduce_mean(tf.log(tf.clip_by_value(tf.sigmoid(pos_score-neg_score), 1e-24, 1-1e-24)))
### for prediction only
full_score = self.item_bias + tf.matmul(temp_vec, self.item_embedding2, transpose_b=True) #(batch, n_items+1)
self.prediction = full_score
self.loss = loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.loss += sum(reg_losses)
if self.args.optim == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.lr)
elif self.args.optim == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(self.lr)
else:
raise NotImplementedError
self.train_op = self.optimizer.minimize(self.loss)
self.recall_at_k, self.ndcg_at_k = self._metric_at_k()
def _metric_at_k(self, k=20):
prediction = self.prediction #(batch, n_items+1)
prediction_transposed = tf.transpose(prediction)
labels = tf.reshape(self.pos, shape=(-1,))
pred_values = tf.expand_dims(tf.diag_part(tf.nn.embedding_lookup(prediction_transposed, labels)), -1)
tile_pred_values = tf.tile(pred_values, [1, self.n_items])
ranks = tf.reduce_sum(tf.cast(prediction[:,1:] > tile_pred_values, dtype=tf.float32), -1) + 1
ndcg = 1. / (log2(1.0 + ranks))
hit_at_k = tf.nn.in_top_k(prediction, labels, k=k)
hit_at_k = tf.cast(hit_at_k, dtype=tf.float32)
ndcg_at_k = ndcg * hit_at_k
return tf.reduce_sum(hit_at_k), tf.reduce_sum(ndcg_at_k)
class FPMC(object):
def __init__(self, args, n_items, n_users):
self.args = args
self.n_items = n_items
self.n_users = n_users
self._build()
self.saver = tf.train.Saver()
def _build(self):
self.inp = tf.placeholder(tf.int32, shape=(None, None), name='inp') # if maxlen is 5, valid len of sample i is 3, then inp[i] = [0, 0, x, x, x]
self.user = tf.placeholder(tf.int32, shape=(None), name='user')
self.pos = tf.placeholder(tf.int32, shape=(None), name='pos')
self.neg = tf.placeholder(tf.int32, shape=(None, self.args.neg_size), name='neg')
self.lr = tf.placeholder(tf.float32, shape=None, name='lr')
self.VUI = tf.get_variable('user_item',
shape=(self.n_users+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.VIU = tf.get_variable('item_user',
shape=(self.n_items+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.VIL = tf.get_variable('item_prev',
shape=(self.n_items+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.VLI = tf.get_variable('prev_item',
shape=(self.n_items+1, self.args.emsize),
dtype=tf.float32,
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg),
initializer=tf.truncated_normal_initializer(stddev=0.01))
self.prev = self.inp[:, -1] #(batch)
u = tf.nn.embedding_lookup(self.VUI, self.user) #(batch, k)
prev = tf.nn.embedding_lookup(self.VLI, self.prev) #(batch, k)
### for train only
pos_iu = tf.nn.embedding_lookup(self.VIU, self.pos) #(batch, k)
pos_il = tf.nn.embedding_lookup(self.VIL, self.pos) #(batch, k)
pos_score = tf.reduce_sum(u*pos_iu, axis=1) + tf.reduce_sum(prev*pos_il, axis=1) #(batch)
neg_iu = tf.nn.embedding_lookup(self.VIU, self.neg) #(batch, neg, k)
neg_il = tf.nn.embedding_lookup(self.VIL, self.neg) #(batch, neg, k)
neg_score = tf.reduce_sum(tf.expand_dims(u, 1)*neg_iu, axis=2) + tf.reduce_sum(tf.expand_dims(prev, 1)*neg_il, axis=2) #(batch, neg)
neg_score = tf.reduce_mean(neg_score, axis=1) #(batch)
loss = -tf.reduce_mean(tf.log(tf.clip_by_value(tf.sigmoid(pos_score-neg_score), 1e-24, 1-1e-24)))
### for prediction only
full_score = tf.matmul(u, self.VIU, transpose_b=True) + tf.matmul(prev, self.VIL, transpose_b=True) #(batch, n_items+1)
self.prediction = full_score
self.loss = loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.loss += sum(reg_losses)
if self.args.optim == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.lr)
elif self.args.optim == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(self.lr)
else:
raise NotImplementedError
self.train_op = self.optimizer.minimize(self.loss)
self.recall_at_k, self.ndcg_at_k = self._metric_at_k()
def _metric_at_k(self, k=20):
prediction = self.prediction #(batch, n_items+1)
prediction_transposed = tf.transpose(prediction)
labels = tf.reshape(self.pos, shape=(-1,))
pred_values = tf.expand_dims(tf.diag_part(tf.nn.embedding_lookup(prediction_transposed, labels)), -1)
tile_pred_values = tf.tile(pred_values, [1, self.n_items])
ranks = tf.reduce_sum(tf.cast(prediction[:,1:] > tile_pred_values, dtype=tf.float32), -1) + 1
ndcg = 1. / (log2(1.0 + ranks))
hit_at_k = tf.nn.in_top_k(prediction, labels, k=k)
hit_at_k = tf.cast(hit_at_k, dtype=tf.float32)
ndcg_at_k = ndcg * hit_at_k
return tf.reduce_sum(hit_at_k), tf.reduce_sum(ndcg_at_k)
| 11,107 | 49.262443 | 151 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/markovChains/__init__.py | 0 | 0 | 0 | py | |
RecommenderSystems | RecommenderSystems-master/sequentialRec/markovChains/train.py | #coding: utf-8
'''
Author: Chence Shi
Contact: chenceshi@pku.edu.cn
'''
import tensorflow as tf
import argparse
import numpy as np
import sys
import time
import math
from .utils import *
from .model import *
from .sampler import *
parser = argparse.ArgumentParser(description='Sequential or session-based recommendation')
parser.add_argument('--model', type=str, default='fossil', help='model: fossil/fpmc. (default: fossil)')
parser.add_argument('--batch_size', type=int, default=128, help='batch size (default: 128)')
parser.add_argument('--seq_len', type=int, default=20, help='max sequence length (default: 20)')
parser.add_argument('--l2_reg', type=float, default=0.0, help='regularization scale (default: 0.0)')
parser.add_argument('--lr', type=float, default=0.01, help='initial learning rate for Adam (default: 0.01)')
parser.add_argument('--lr_decay', type=float, default=0.5, help='learning rate decay rate (default: 0.5)')
parser.add_argument('--emsize', type=int, default=100, help='dimension of item embedding (default: 100)')
parser.add_argument('--neg_size', type=int, default=1, help='size of negative samples (default: 1)')
parser.add_argument('--worker', type=int, default=10, help='number of sampling workers (default: 10)')
parser.add_argument('--seed', type=int, default=1111, help='random seed (default: 1111)')
parser.add_argument('--data', type=str, default='gowalla', help='data set name (default: gowalla)')
parser.add_argument('--log_interval', type=int, default=1e2, help='log interval (default: 1e2)')
parser.add_argument('--eval_interval', type=int, default=1e3, help='eval/test interval (default: 1e3)')
parser.add_argument('--optim', type=str, default='adam', help='optimizer: sgd/adam (default: adam)')
parser.add_argument('--warm_up', type=int, default=0, help='warm up step (default: 0)')
# ****************************** unique arguments for FOSSIL *******************************************************
parser.add_argument('--alpha', type=float, default=0.2, help='alpha (default: 0.2)')
parser.add_argument('--order', type=int, default=1, help='order of Fossil (default: 1)')
# ****************************** unique arguments for FPMC *******************************************************
# None
args = parser.parse_args()
tf.set_random_seed(args.seed)
train_data, val_data, test_data, n_items, n_users = data_generator(args)
train_sampler = Sampler(
data=train_data,
n_items=n_items,
n_users=n_users,
batch_size=args.batch_size,
max_len=args.seq_len,
neg_size=args.neg_size,
n_workers=args.worker,
neg_method='rand')
val_data = prepare_eval_test(val_data, batch_size=100, max_test_len= 20)
test_data = prepare_eval_test(test_data, batch_size=100, max_test_len= 20)
checkpoint_dir = '_'.join(['save', args.data, args.model, str(args.lr), str(args.l2_reg), str(args.emsize)])
print(args)
print ('#Item: ', n_items)
print ('#User: ', n_users)
model_dict = {'fossil': FOSSIL, 'fpmc': FPMC}
assert args.model in ['fossil', 'fpmc']
model = model_dict[args.model](args, n_items, n_users)
lr = args.lr
def evaluate(source, sess):
total_recall = 0.0
total_ndcg = 0.0
count = 0.0
for batch in source:
feed_dict = {model.inp: batch[1], model.user:batch[0], model.pos:batch[2]}
recall, ndcg = sess.run([model.recall_at_k, model.ndcg_at_k], feed_dict=feed_dict)
count += len(batch[0])
total_recall += recall
total_ndcg += ndcg
val_recall = total_recall / count
val_ndcg = total_ndcg / count
return [val_recall, val_ndcg]
def main():
global lr
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
all_val_recall = [-1]
early_stop_cn = 0
step_count = 0
train_loss_l = 0.
start_time = time.time()
print('Start training...')
try:
while True:
cur_batch = train_sampler.next_batch()
inp = np.array(cur_batch[1])
feed_dict = {model.inp: inp, model.lr: lr}
feed_dict[model.pos] = np.array(cur_batch[2])
feed_dict[model.neg] = np.array(cur_batch[3])
feed_dict[model.user] = np.array(cur_batch[0])
_, train_loss = sess.run([model.train_op, model.loss], feed_dict=feed_dict)
train_loss_l += train_loss
step_count += 1
if step_count % args.log_interval == 0:
cur_loss = train_loss_l / args.log_interval
elapsed = time.time() - start_time
print('| Totol step {:10d} | lr {:02.5f} | ms/batch {:5.2f} | loss {:5.3f}'.format(
step_count, lr, elapsed * 1000 / args.log_interval, cur_loss))
sys.stdout.flush()
train_loss_l = 0.
start_time = time.time()
if step_count % args.eval_interval == 0 and step_count > args.warm_up:
val_recall, val_ndcg = evaluate(val_data, sess)
all_val_recall.append(val_recall)
print('-' * 90)
print('| End of step {:10d} | valid recall@20 {:8.5f} | valid ndcg@20 {:8.5f}'.format(
step_count, val_recall, val_ndcg))
print('=' * 90)
sys.stdout.flush()
if all_val_recall[-1] <= all_val_recall[-2]:
lr = lr * args.lr_decay
lr = max(lr, 1e-6)
early_stop_cn += 1
else:
early_stop_cn = 0
model.saver.save(sess, checkpoint_dir + '/model.ckpt')
if early_stop_cn == 3:
print('Validation recall decreases in three consecutive epochs. Stop Training!')
sys.stdout.flush()
break
start_time = time.time()
except Exception as e:
print(str(e))
train_sampler.close()
exit(1)
train_sampler.close()
print('Done')
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(sess, '{}/{}'.format(checkpoint_dir, 'model.ckpt'))
print('Restore model successfully')
else:
print('Restore model failed!!!!!')
test_recall, test_ndcg = evaluate(test_data, sess)
print('-' * 90)
print('test recall@20 {:8.5f} | test ndcg@20 {:8.5f}'.format(
test_recall, test_ndcg))
print('=' * 90)
if __name__ == '__main__':
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
main()
| 6,793 | 38.5 | 116 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/base.py | # coding: utf-8
'''
Author: Weiping Song, Chence Shi, Zheye Deng
Contact: songweiping@pku.edu.cn, chenceshi@pku.edu.cn, dzy97@pku.edu.cn
'''
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
class LSTMNet(object):
def __init__(self, layers=1, hidden_units=100, hidden_activation="tanh", dropout=0.2):
self.layers = layers
self.hidden_units = hidden_units
if hidden_activation == "tanh":
self.hidden_activation = tf.nn.tanh
elif hidden_activation == "relu":
self.hidden_activation = tf.nn.relu
else:
raise NotImplementedError
self.dropout = dropout
def __call__(self, inputs, mask):
'''
inputs: the embeddings of a batch of sequences. (batch_size, seq_length, emb_size)
mask: mask for imcomplete sequences. (batch_size, seq_length, 1)
'''
cells = []
for _ in range(self.layers):
cell = rnn.BasicLSTMCell(self.hidden_units, activation=self.hidden_activation)
cell = rnn.DropoutWrapper(cell, output_keep_prob=1.-self.dropout)
cells.append(cell)
self.cell = cell = rnn.MultiRNNCell(cells)
zero_state = cell.zero_state(tf.shape(inputs)[0], dtype=tf.float32)
sequence_length = tf.count_nonzero(tf.squeeze(mask, [-1]), -1)
outputs, state = tf.nn.dynamic_rnn(cell, inputs, sequence_length=sequence_length, initial_state=zero_state)
return outputs
class TemporalConvNet(object):
def __init__(self, num_channels, stride=1, kernel_size=2, dropout=0.2):
self.kernel_size=kernel_size
self.stride = stride
self.num_levels = len(num_channels)
self.num_channels = num_channels
self.dropout = dropout
def __call__(self, inputs, mask):
inputs_shape = inputs.get_shape().as_list()
outputs = [inputs]
for i in range(self.num_levels):
dilation_size = 2 ** i
in_channels = inputs_shape[-1] if i == 0 else self.num_channels[i-1]
out_channels = self.num_channels[i]
output = self._TemporalBlock(outputs[-1], in_channels, out_channels, self.kernel_size,
self.stride, dilation=dilation_size, padding=(self.kernel_size-1)*dilation_size,
dropout=self.dropout, level=i)
outputs.append(output)
return outputs[-1]
def _TemporalBlock(self, value, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2, level=0):
padded_value1 = tf.pad(value, [[0,0], [padding,0], [0,0]])
self.conv1 = tf.layers.conv1d(inputs=padded_value1,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv1')
self.output1 = tf.nn.dropout(tf.nn.relu(self.conv1), keep_prob=1-dropout)
padded_value2 = tf.pad(self.output1, [[0,0], [padding,0], [0,0]])
self.conv2 = tf.layers.conv1d(inputs=padded_value2,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv2')
self.output2 = tf.nn.dropout(tf.nn.relu(self.conv2), keep_prob=1-dropout)
if n_inputs != n_outputs:
res_x = tf.layers.conv1d(inputs=value,
filters=n_outputs,
kernel_size=1,
activation=None,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv')
else:
res_x = value
return tf.nn.relu(res_x + self.output2)
def normalize(inputs,
epsilon = 1e-8,
scope="ln",
reuse=None):
'''Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
def multihead_attention(queries,
keys,
num_units=None,
num_heads=8,
dropout_keep_prob=1.0,
causality=False,
scope="multihead_attention",
reuse=None,
with_qk=False):
'''
Applies multihead attention.
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
num_units: A scalar. Attention size.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
num_heads: An int. Number of heads.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
# Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
# K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
# V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
Q = tf.layers.dense(queries, num_units, activation=None) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs)*(-2**32+1)
outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
try:
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
except:
tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks)*(-2**32+1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (N, T_q, C)
# Dropouts
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
# Weighted sum
outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)
# Residual connection
outputs += queries
# Normalize
#outputs = normalize(outputs) # (N, T_q, C)
if with_qk:
return Q, K
else:
return outputs
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention",
dropout_keep_prob=1.0,
reuse=None):
'''
Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
#outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
#outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Residual connection
outputs += inputs
# Normalize
#outputs = normalize(outputs)
return outputs
class TransformerNet(object):
def __init__(self, num_units, num_blocks, num_heads, maxlen, dropout_rate, pos_fixed, l2_reg=0.0):
self.num_units = num_units
self.num_blocks = num_blocks
self.num_heads = num_heads
self.maxlen = maxlen
self.dropout_keep_prob = 1. - dropout_rate
self.position_encoding_matrix = None
self.pos_fixed = pos_fixed
self.l2_reg = l2_reg
#self.position_encoding = position_encoding(self.maxlen, self.num_units) # (maxlen, num_units)
def position_embedding(self, inputs, maxlen, num_units, l2_reg=0.0, scope="pos_embedding", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
pos_embedding_table = tf.get_variable('pos_embedding_table', dtype=tf.float32, shape=[maxlen, num_units], regularizer=tf.contrib.layers.l2_regularizer(l2_reg))
outputs = tf.nn.embedding_lookup(pos_embedding_table, inputs)
return outputs
def get_position_encoding(self, inputs, scope="pos_embedding/", reuse=None, dtype=tf.float32):
with tf.variable_scope(scope, reuse=reuse):
if self.position_encoding_matrix is None:
encoded_vec = np.array([pos/np.power(10000, 2*i/self.num_units) for pos in range(self.maxlen) for i in range(self.num_units)])
encoded_vec[::2] = np.sin(encoded_vec[::2])
encoded_vec[1::2] = np.cos(encoded_vec[1::2])
encoded_vec = tf.convert_to_tensor(encoded_vec.reshape([self.maxlen, self.num_units]), dtype=dtype)
self.position_encoding_matrix = encoded_vec # (maxlen, num_units)
N = tf.shape(inputs)[0]
T = tf.shape(inputs)[1]
position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1]) # (batch_size , len)
position_encoding = tf.nn.embedding_lookup(self.position_encoding_matrix, position_ind) # (batch_size, len, num_units)
return position_encoding
def __call__(self, inputs, mask):
'''
Args:
inputs: sequence embeddings (item_embeddings + pos_embeddings) shape: (batch_size , maxlen, embedding_size)
Return:
Output sequences which has the same shape with inputs
'''
if self.pos_fixed: # use sin /cos positional embedding
position_encoding = self.get_position_encoding(inputs) # (batch_size, len, num_units)
else:
position_encoding = self.position_embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(inputs)[1]), 0), [tf.shape(inputs)[0], 1]), self.maxlen, self.num_units, self.l2_reg)
inputs += position_encoding
inputs *= mask
for i in range(self.num_blocks):
with tf.variable_scope("num_blocks_%d" % i):
# Self-attention
inputs = multihead_attention(queries=normalize(inputs),
keys=inputs,
num_units=self.num_units,
num_heads=self.num_heads,
dropout_keep_prob=self.dropout_keep_prob,
causality=True,
scope="self_attention")
# Feed forward
inputs = feedforward(normalize(inputs), num_units=[self.num_units, self.num_units],
dropout_keep_prob=self.dropout_keep_prob)
inputs *= mask
outputs = normalize(inputs) # (batch_size, maxlen, num_units)
return outputs
| 15,214 | 43.75 | 182 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/test.py | #coding: utf-8
'''
Author: Weiping Song
Contact: songweiping@pku.edu.cn
'''
import tensorflow as tf
import argparse
import numpy as np
import sys
import time
import math
from .utils import *
from .model import *
from .eval import Evaluation
parser = argparse.ArgumentParser(description='Sequential or session-based recommendation')
parser.add_argument('--model', type=str, default='tcn', help='sequential model: rnn/tcn/transformer. (default: tcn)')
parser.add_argument('--batch_size', type=int, default=128, help='batch size (default: 128)')
parser.add_argument('--seq_len', type=int, default=20, help='max sequence length (default: 20)')
parser.add_argument('--dropout', type=float, default=0.2, help='dropout (default: 0.2)')
parser.add_argument('--l2_reg', type=float, default=0.0, help='regularization scale (default: 0.0)')
parser.add_argument('--clip', type=float, default=1., help='gradient clip (default: 1.)')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit (default: 20)')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for Adam (default: 0.001)')
parser.add_argument('--emsize', type=int, default=100, help='dimension of item embedding (default: 100)')
parser.add_argument('--neg_size', type=int, default=1, help='size of negative samples (default: 1)')
parser.add_argument('--worker', type=int, default=10, help='number of sampling workers (default: 10)')
parser.add_argument('--nhid', type=int, default=100, help='number of hidden units (default: 100)')
parser.add_argument('--levels', type=int, default=3, help='# of levels (default: 3)')
parser.add_argument('--seed', type=int, default=1111, help='random seed (default: 1111)')
parser.add_argument('--loss', type=str, default='ns', help='type of loss: ns/sampled_sm/full_sm (default: ns)')
parser.add_argument('--data', type=str, default='gowalla', help='data set name (default: gowalla)')
parser.add_argument('--log_interval', type=int, default=1e2, help='log interval (default: 1e2)')
parser.add_argument('--eval_interval', type=int, default=1e3, help='eval/test interval (default: 1e3)')
# ****************************** unique arguments for rnn model. *******************************************************
# None
# ***************************** unique arguemnts for tcn model.
parser.add_argument('--ksize', type=int, default=3, help='kernel size (default: 100)')
# ****************************** unique arguments for transformer model. *************************************************
parser.add_argument('--num_blocks', type=int, default=3, help='num_blocks')
parser.add_argument('--num_heads', type=int, default=2, help='num_heads')
parser.add_argument('--pos_fixed', type=int, default=0, help='trainable positional embedding usually has better performance')
args = parser.parse_args()
tf.set_random_seed(args.seed)
train_data, val_data, test_data, n_items, n_users = data_generator(args)
max_test_len = 20
test_data_per_step = prepare_eval_test(test_data, batch_size=100, max_test_len=max_test_len)
checkpoint_dir = '_'.join(['save', args.data, args.model, str(args.lr), str(args.l2_reg), str(args.emsize), str(args.dropout)])
print(args)
print ('#Item: ', n_items)
print ('#User: ', n_users)
model = NeuralSeqRecommender(args, n_items, n_users)
lr = args.lr
def evaluate_subsequent(source, sess):
EV = Evaluation()
for u in source.keys():
itemids = source[u]
uid = int(u)
l = min(len(itemids), max_test_len)
if l < 2:
continue
feed_dict = {model.inp:[itemids[:l-1]], model.dropout: 0}
prediction = sess.run(model.prediction, feed_dict=feed_dict)
prediction = prediction.flatten()
for i in range(1, l):
i_pred = prediction[(i-1)*(n_items+1): i*(n_items+1)]
rank = np.argsort(-i_mi[1:]) + 1
EV.eval(int(u), itemids[i:l], rank[:20])
EV.result()
def evaluate(source, sess):
total_hit_k = 0.0
total_ndcg_k = 0.0
count = 0.0
for batch in source:
feed_dict = {model.inp: batch[1], model.dropout: 0.}
feed_dict[model.pos] = batch[2]
hit, ndcg, n_target = sess.run([model.hit_at_k, model.ndcg_at_k, model.num_target], feed_dict=feed_dict)
count += n_target
total_hit_k += hit
total_ndcg_k += ndcg
val_hit = total_hit_k / count
val_ndcg = total_ndcg_k / count
return [val_hit, val_ndcg]
def main():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(sess, '{}/{}'.format(checkpoint_dir, 'model.ckpt'))
print('Restore model successfully')
else:
print('Restore model failed!!!!!')
test_hit, test_ndcg = evaluate(test_data_per_step, sess)
print('Step-wise test :\nRecall@20 {:8.5f} | Ndcg@20 {:8.5f}'.format(test_hit, test_ndcg))
#print('Subsequent as targets:\n')
#evaluate_subsequent(test_data, sess)
if __name__ == '__main__':
if not os.path.exists(checkpoint_dir):
print('Checkpoint directory not found!')
exit(0)
main()
| 5,225 | 41.836066 | 127 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/sampler.py | #coding=utf-8
'''
Author: Weiping Song
Contact: songweiping@pku.edu.cn
Reference: https://github.com/kang205/SASRec/blob/master/sampler.py
'''
# Disclaimer:
# Part of this file is derived from
# https://github.com/kang205/SASRec/
import numpy as np
from multiprocessing import Process, Queue
def random_neg(pos, n, s):
'''
p: positive one
n: number of items
s: size of samples.
'''
neg = set()
for _ in range(s):
t = np.random.randint(1, n+1)
while t in pos or t in neg:
t = np.random.randint(1, n+1)
neg.add(t)
return list(neg)
def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand'):
'''
data: list of train data, key: user, value: a set of all user's clicks.
tensors: list of train tensors, each element of list is also a list.
masks: list of train masks, each element of list is also a list.
batch_size: number of samples in a batch.
neg_size: number of negative samples.
'''
num_samples = np.array([len(data[str(u)]) for u in range(1, n_users+1)])
prob_ = num_samples / (1.0 * np.sum(num_samples))
def sample():
# sample a user based on behavior frequency.
user = np.random.choice(a=range(1,1+n_users), p=prob_)
u = str(user)
# sample a slice from user u randomly.
if len(data[u]) <= max_len:
idx = 0
else:
idx = np.random.randint(0, len(data[u])-max_len+1)
seq = np.zeros([max_len], dtype=np.int32)
for i, itemid in enumerate(data[u][idx:idx+max_len]):
seq[i] = itemid
pos = np.zeros([max_len], dtype=np.int32)
neg = np.zeros([max_len, neg_size], dtype=np.int32)
l = len(data[u]) - idx - 1
l = min(l, max_len)
for j in range(l):
pos[j] = data[u][idx+1+j]
if neg_method == 'rand':
neg[j,:] = random_neg([pos[j]], n_items, neg_size)
else: # Currently we only support random negative samples.
raise NotImplementedError
return (user, seq, pos, neg)
np.random.seed(SEED)
while True:
one_batch = []
for i in range(batch_size):
one_batch.append(sample())
result_queue.put(list(zip(*one_batch)))
class Sampler(object):
def __init__(self, data, n_items, n_users, batch_size=128, max_len=20, neg_size=10, n_workers=10, neg_method='rand'):
self.result_queue = Queue(maxsize=int(2e5))
self.processors = []
for i in range(n_workers):
self.processors.append(
Process(target=sample_function, args=(data,
n_items,
n_users,
batch_size,
max_len,
neg_size,
self.result_queue,
np.random.randint(2e9),
neg_method)))
self.processors[-1].daemon = True
self.processors[-1].start()
def next_batch(self):
return self.result_queue.get()
def close(self):
for p in self.processors:
p.terminate()
p.join()
| 3,473 | 34.814433 | 121 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/utils.py | #coding=utf-8
'''
Author: Weiping Song
Contact: Weiping Song
'''
import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
class Dictionary(object):
def __init__(self):
self.item2idx = {}
self.idx2item = []
self.counter = Counter()
def add_item(self, item):
self.counter[item] +=1
def prep_dict(self):
for item in self.counter:
if item not in self.item2idx:
self.idx2item.append(item)
self.item2idx[item] = len(self.idx2item)
def __len__(self):
return len(self.idx2item)
class Corpus(object):
def __init__(self, ItemId):
self.dict = Dictionary()
for item in ItemId:
self.dict.add_item(item)
self.dict.prep_dict()
def data_generator(args):
path_to_data= data_path + args.data + '/'
if not os.path.exists(path_to_data + args.data + '_train_tr.json'):
tr_df = pd.read_csv(path_to_data + args.data + '_train_tr.txt', sep='\t')
val_df = pd.read_csv(path_to_data + args.data + '_train_valid.txt', sep='\t')
test_df = pd.read_csv(path_to_data + args.data + '_test.txt', sep='\t')
corpus_item = Corpus(tr_df['ItemId'])
corpus_user = Corpus(tr_df['UserId'])
np.save(path_to_data + args.data + '_item_dict', np.asarray(corpus_item.dict.idx2item))
np.save(path_to_data + args.data + '_user_dict', np.asarray(corpus_user.dict.idx2item))
tr = tr_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
val = val_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
test = test_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
_ = prepare_data(corpus_item, corpus_user, tr, args.data + '_train_tr', path_to_data)
_ = prepare_data(corpus_item, corpus_user, val, args.data + '_train_valid',path_to_data)
_ = prepare_data(corpus_item, corpus_user, test, args.data + '_test', path_to_data)
with open(path_to_data + args.data + '_train_tr.json', 'r') as fp:
train_data = json.load(fp)
with open(path_to_data + args.data + '_train_valid.json', 'r') as fp:
val_data = json.load(fp)
with open(path_to_data + args.data + '_test.json', 'r') as fp:
test_data = json.load(fp)
item2idx = np.load(path_to_data + args.data + '_item_dict.npy')
user2idx = np.load(path_to_data + args.data + '_user_dict.npy')
n_items = item2idx.size
n_users = user2idx.size
return [train_data, val_data, test_data, n_items, n_users]
def prepare_data(corpus_item, corpus_user, data, dname, path_to_data):
ret = {}
user_str_ids = data.keys()
for u in user_str_ids:
u_int_id = corpus_user.dict.item2idx[u]
i_int_ids = []
item_str_ids = data[u]
for i in item_str_ids:
i_int_ids.append(corpus_item.dict.item2idx[i])
ret[u_int_id] = i_int_ids
with open(path_to_data + dname + '.json', 'w') as fp:
json.dump(ret, fp)
return ret
def prepare_eval_test(data, batch_size, max_test_len=100):
if batch_size < 2:
batch_size = 2
uids = data.keys()
all_u = []
all_inp = []
all_pos = []
for u in uids:
all_u.append(int(u))
itemids = data[u]
inp = np.zeros([max_test_len], dtype=np.int32)
pos = np.zeros([max_test_len], dtype=np.int32)
l = min(max_test_len, len(itemids))
inp[:l] = itemids[:l]
pos[:l-1] = itemids[1:l]
all_inp.append(inp)
all_pos.append(pos)
num_batches = int(len(all_u) / batch_size)
batches = []
for i in range(num_batches):
batch_u = all_u[i*batch_size: (i+1)*batch_size]
batch_inp = all_inp[i*batch_size: (i+1)*batch_size]
batch_pos = all_pos[i*batch_size: (i+1)*batch_size]
batches.append((batch_u, batch_inp, batch_pos))
if num_batches * batch_size < len(all_u):
batches.append((all_u[num_batches * batch_size:], all_inp[num_batches * batch_size:], all_pos[num_batches * batch_size:]))
return batches
def preprocess_session(dname):
'''
The model can be applied for session-based recommendation, where a sequence is seen as a user's history.
The data should contain three columns, i.e., SessionId, ItemId and Time with Tab as separator.
'''
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
data.columns = ['SessionId', 'ItemId', 'Time']
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.ItemId.nunique()))
session_lengths = data.groupby('SessionId').size()
print('Average session length: {}'.format(session_lengths.mean()))
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
session_lengths = data.groupby('SessionId').size()
print('Average session length after removing sessions with less than two event: {}'.format(session_lengths.mean()))
session_max_times = data.groupby('SessionId').Time.max()
tmax = data.Time.max()
session_train = session_max_times[session_max_times < tmax-86400*2].index # We preserve sessions of last two days as validation and test data
session_test = session_max_times[session_max_times >= tmax-86400*2].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>2].index)]
test_session = test.SessionId.unique()
test_session_ = np.random.choice(test_session, int(len(test_session) / 2), replace=False)
test_ = test.loc[test['SessionId'].isin(test_session_)]
val_ = test.loc[~test['SessionId'].isin(test_session_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
columns = ['SessionId', 'ItemId', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False)
def preprocess_sequence(dname):
'''
For sequential recommendation.
The data should contain three columns, i.e., user, item and Time with Tab as separator.
'''
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
data.columns = ['user', 'item', 'Time']
event_lengths = data.groupby('user').size()
print('Average check-ins per user: {}'.format(event_lengths.mean()))
data = data[np.in1d(data.user, event_lengths[event_lengths>10].index)]
item_supports = data.groupby('item').size()
# 50 for delicious, 10 for gowalla
data = data[np.in1d(data.item, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.item.nunique()))
event_lengths = data.groupby('user').size()
data = data[np.in1d(data.user, event_lengths[event_lengths>=10].index)]
event_lengths = data.groupby('user').size()
print('Average check-ins per user after removing sessions with one event: {}'.format(event_lengths.mean()))
tmin = data.Time.min()
tmax = data.Time.max()
pivot = (tmax-tmin) * 0.9 + tmin # Preserve last 10% as validation and test data
train = data.loc[data['Time'] < pivot]
test = data.loc[data['Time'] >= pivot]
tr_event_lengths = train.groupby('user').size()
train = train[np.in1d(train.user, tr_event_lengths[tr_event_lengths>3].index)]
print('Average (train) check-ins per user: {}'.format(tr_event_lengths.mean()))
user_to_predict = train.user.unique()
test = test[test['user'].isin(user_to_predict)]
item_to_predict = train.item.unique()
test = test[test['item'].isin(item_to_predict)]
test_event_lengths = test.groupby('user').size()
test = test[np.in1d(test.user, test_event_lengths[test_event_lengths>3].index)]
print('Average (test) check-ins per user: {}'.format(test_event_lengths.mean()))
test_user = test.user.unique()
test_user_ = np.random.choice(test_user, int(len(test_user) / 2), replace=False)
test_ = test.loc[test['user'].isin(test_user_)]
val_ = test.loc[~test['user'].isin(test_user_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
columns = ['user', 'item', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False)
if __name__ == '__main__':
preprocess_sequence('gowalla')
| 9,599 | 40.921397 | 145 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/model.py | #coding=utf-8
'''
Author: Weiping Song
Contact: songweiping@pku.edu.cn
'''
import tensorflow as tf
import sys
from .base import LSTMNet
from .base import TemporalConvNet
from .base import TransformerNet
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
class NeuralSeqRecommender(object):
def __init__(self, args, n_items, n_users):
self.args = args
self.n_items = n_items
self.n_users = n_users
self._build()
self.saver = tf.train.Saver()
def _build(self):
self.inp = tf.placeholder(tf.int32, shape=(None, None), name='inp')
self.pos = tf.placeholder(tf.int32, shape=(None, None), name='pos')
self.neg = tf.placeholder(tf.int32, shape=(None, None, self.args.neg_size), name='neg')
self.lr = tf.placeholder(tf.float32, shape=None, name='lr')
self.dropout = tf.placeholder_with_default(0., shape=())
self.item_embedding = item_embedding = tf.get_variable('item_embedding', \
shape=(self.n_items + 1, self.args.emsize), \
dtype=tf.float32, \
regularizer=tf.contrib.layers.l2_regularizer(self.args.l2_reg), \
initializer=tf.contrib.layers.xavier_initializer())
input_item = tf.nn.embedding_lookup(item_embedding, self.inp)
mask = tf.expand_dims(tf.to_float(tf.not_equal(self.inp, 0)), -1)
if self.args.model == 'tcn':
num_channels = [self.args.nhid] * (self.args.levels -1 ) + [self.args.emsize]
self.net = TemporalConvNet(num_channels, stride=1, kernel_size=self.args.ksize, dropout=self.dropout)
elif self.args.model == 'rnn':
self.net = LSTMNet(layers=self.args.levels, hidden_units=self.args.nhid, dropout=self.dropout)
elif self.args.model == 'transformer':
self.net = TransformerNet(self.args.emsize, self.args.levels, self.args.num_heads, self.args.seq_len, dropout_rate=self.dropout, pos_fixed=self.args.pos_fixed)
else:
raise NotImplementedError
outputs = self.net(input_item, mask)
outputs *= mask
ct_vec = tf.reshape(outputs, (-1, self.args.emsize))
outputs_shape = tf.shape(outputs)
self.total_loss = 0.
self.istarget = istarget = tf.reshape(tf.to_float(tf.not_equal(self.pos, 0)), [-1])
_pos_emb = tf.nn.embedding_lookup(self.item_embedding, self.pos)
pos_emb = tf.reshape(_pos_emb, (-1, self.args.emsize))
_neg_emb = tf.nn.embedding_lookup(self.item_embedding, self.neg)
neg_emb = tf.reshape(_neg_emb, (-1, self.args.neg_size, self.args.emsize))
temp_vec_neg = tf.tile(tf.expand_dims(ct_vec, [1]), [1, self.args.neg_size, 1])
if self.args.loss == 'ns':
assert self.args.neg_size == 1
pos_logit = tf.reduce_sum(ct_vec * pos_emb, -1)
neg_logit = tf.squeeze(tf.reduce_sum(temp_vec_neg * neg_emb, -1), 1)
loss = tf.reduce_sum(
-tf.log(tf.sigmoid(pos_logit) + 1e-24) * istarget - \
tf.log(1 - tf.sigmoid(neg_logit) + 1e-24) * istarget \
) / tf.reduce_sum(istarget)
elif self.args.loss == 'sampled_sm':
pos_logit = tf.reduce_sum(ct_vec * pos_emb, -1, keepdims=True)
neg_logit = tf.reduce_sum(temp_vec_neg * neg_emb, -1)
label_1 = tf.ones_like(pos_logit, dtype=tf.float32)
label_0 = tf.zeros_like(neg_logit, dtype=tf.float32)
labels = tf.concat([label_1, label_0], -1)
logit = tf.concat([pos_logit, neg_logit], -1)
softmax_logit = tf.nn.softmax(logit)
loss = tf.reduce_sum( \
tf.reduce_sum( \
- labels * tf.log(softmax_logit + 1e-24) - \
(1. - labels) * tf.log(1. - softmax_logit + 1e-24), -1) * istarget \
) / tf.reduce_sum(istarget)
elif self.args.loss == 'full_sm':
full_logits = tf.matmul(ct_vec, self.item_embedding, transpose_b=True)
loss = tf.reduce_sum( \
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.pos, [-1]), \
logits=full_logits) * istarget \
) / tf.reduce_sum(istarget)
full_logits = tf.matmul(ct_vec, self.item_embedding, transpose_b=True)
self.prediction = full_logits
self.loss = loss
self.total_loss += loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.total_loss += sum(reg_losses)
optimizer = tf.train.AdamOptimizer(self.lr)
gvs = optimizer.compute_gradients(self.total_loss)
capped_gvs = [(tf.clip_by_value(grad, -self.args.clip, self.args.clip), var) for grad, var in gvs]
self.train_op = optimizer.apply_gradients(capped_gvs)
self.hit_at_k, self.ndcg_at_k, self.num_target = self._metric_at_k()
def _metric_at_k(self, k=20):
prediction = self.prediction
prediction_transposed = tf.transpose(prediction)
labels = tf.reshape(self.pos, shape=(-1,))
pred_values = tf.expand_dims(tf.diag_part(tf.nn.embedding_lookup(prediction_transposed, labels)), -1)
tile_pred_values = tf.tile(pred_values, [1, self.n_items])
ranks = tf.reduce_sum(tf.cast(prediction[:,1:] > tile_pred_values, dtype=tf.float32), -1) + 1
istarget = tf.reshape(self.istarget, shape=(-1,))
ndcg = 1. / (log2(1.0 + ranks))
hit_at_k = tf.nn.in_top_k(prediction, labels, k=k) # also known as Recall@k
hit_at_k = tf.cast(hit_at_k, dtype=tf.float32)
istarget = tf.reshape(self.istarget, shape=(-1,))
hit_at_k *= istarget
ndcg_at_k = ndcg * istarget * hit_at_k
return (tf.reduce_sum(hit_at_k), tf.reduce_sum(ndcg_at_k), tf.reduce_sum(istarget))
| 6,096 | 45.9 | 171 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/eval.py | import numpy as np
class Evaluation:
'''
In progress...
Eventually, we aim to include popular evaluation metrics as many as possible.
'''
def __init__(self, ks = [1, 5, 10, 20], ndcg_cutoff = 20):
self.k = ks
self.ndcg_cutoff = ndcg_cutoff
self.clear()
def clear(self):
self.P = np.zeros_like(self.k, dtype=np.float32)
self.R = np.zeros_like(self.k, dtype=np.float32)
self.MAP = []
self.NDCG = []
def eval(self, user_id, target, prediction):
'''
:param user_id: int
:param target: list of int
:param prediction: list of int
:return:
'''
ranking = {}
num_hits = 0.
ap_score = 0.
P = np.zeros_like(self.k, dtype=np.float32)
for idx, item in enumerate(prediction):
ranking[item] = idx + 1
if item in target:
for i, k in enumerate(self.k):
if idx < k:
P[i] += 1.0 # the predicted item is in top-k (Precise@K)
if item in target and item not in prediction[:idx]:
num_hits += 1.0
ap_score += num_hits / (idx + 1.0)
for i, k in enumerate(self.k):
P[i] /= float(k) # Precise@K should be divided by K
self.P = self.P + P
ap_score /= float(len(prediction))
self.MAP.append(ap_score)
R = np.zeros_like(self.k, dtype=np.float32)
ndcg = 0
for idx, item in enumerate(target):
for i, k in enumerate(self.k):
if item in prediction[:k]:
R[i] += 1 # the target is in top-k prediction (Recall@K)
if ranking.get(item, 1e9) <= self.ndcg_cutoff:
ndcg += 1.0 / np.log2(1.0 + ranking[item])
ndcg /= float(len(target))
self.NDCG.append(ndcg)
R = R / float(len(target)) # Recall@K should be divided by number of targets
self.R = self.R + R
def result(self):
num_data = len(self.MAP)
self.P = self.P / float(num_data)
self.R = self.R / float(num_data)
print("==========================================")
print("NDCG@%d = %8.4f" % (self.ndcg_cutoff, np.mean(self.NDCG)))
print("MAP = %8.4f" % np.mean(self.MAP))
for i, k in enumerate(self.k):
print("Precise @%2d = %6.4f" % (k, self.P[i]))
for i, k in enumerate(self.k):
print("Recall @%2d = %6.4f" % (k, self.R[i]))
print("==========================================")
| 2,624 | 32.653846 | 89 | py |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/__init__.py | 0 | 0 | 0 | py | |
RecommenderSystems | RecommenderSystems-master/sequentialRec/neural/train.py | #coding: utf-8
'''
Author: Weiping Song
Contact: songweiping@pku.edu.cn
'''
import tensorflow as tf
import argparse
import numpy as np
import sys
import time
import math
from .utils import *
from .model import *
from .sampler import *
parser = argparse.ArgumentParser(description='Sequential or session-based recommendation')
parser.add_argument('--model', type=str, default='tcn', help='sequential model: rnn/tcn/transformer. (default: tcn)')
parser.add_argument('--batch_size', type=int, default=128, help='batch size (default: 128)')
parser.add_argument('--seq_len', type=int, default=20, help='max sequence length (default: 20)')
parser.add_argument('--dropout', type=float, default=0.2, help='dropout (default: 0.2)')
parser.add_argument('--l2_reg', type=float, default=0.0, help='regularization scale (default: 0.0)')
parser.add_argument('--clip', type=float, default=1., help='gradient clip (default: 1.)')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit (default: 20)')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for Adam (default: 0.001)')
parser.add_argument('--emsize', type=int, default=100, help='dimension of item embedding (default: 100)')
parser.add_argument('--neg_size', type=int, default=1, help='size of negative samples (default: 10)')
parser.add_argument('--worker', type=int, default=10, help='number of sampling workers (default: 10)')
parser.add_argument('--nhid', type=int, default=100, help='number of hidden units (default: 100)')
parser.add_argument('--levels', type=int, default=3, help='# of levels (default: 3)')
parser.add_argument('--seed', type=int, default=1111, help='random seed (default: 1111)')
parser.add_argument('--loss', type=str, default='ns', help='type of loss: ns/sampled_sm/full_sm (default: ns)')
parser.add_argument('--data', type=str, default='gowalla', help='data set name (default: gowalla)')
parser.add_argument('--log_interval', type=int, default=1e2, help='log interval (default: 1e2)')
parser.add_argument('--eval_interval', type=int, default=1e3, help='eval/test interval (default: 1e3)')
# ****************************** unique arguments for rnn model. *******************************************************
# None
# ***************************** unique arguemnts for tcn model.
parser.add_argument('--ksize', type=int, default=3, help='kernel size (default: 100)')
# ****************************** unique arguments for transformer model. *************************************************
parser.add_argument('--num_blocks', type=int, default=3, help='num_blocks')
parser.add_argument('--num_heads', type=int, default=2, help='num_heads')
parser.add_argument('--pos_fixed', type=int, default=0, help='trainable positional embedding usually has better performance')
args = parser.parse_args()
tf.set_random_seed(args.seed)
train_data, val_data, test_data, n_items, n_users = data_generator(args)
train_sampler = Sampler(
data=train_data,
n_items=n_items,
n_users=n_users,
batch_size=args.batch_size,
max_len=args.seq_len,
neg_size=args.neg_size,
n_workers=args.worker,
neg_method='rand')
val_data = prepare_eval_test(val_data, batch_size=100, max_test_len= 20)
checkpoint_dir = '_'.join(['save', args.data, args.model, str(args.lr), str(args.l2_reg), str(args.emsize), str(args.dropout)])
print(args)
print ('#Item: ', n_items)
print ('#User: ', n_users)
model = NeuralSeqRecommender(args, n_items, n_users)
lr = args.lr
def evaluate(source, sess):
total_hit_k = 0.0
total_ndcg_k = 0.0
count = 0.0
for batch in source:
feed_dict = {model.inp: batch[1], model.dropout: 0.}
feed_dict[model.pos] = batch[2]
hit, ndcg, n_target = sess.run([model.hit_at_k, model.ndcg_at_k, model.num_target], feed_dict=feed_dict)
count += n_target
total_hit_k += hit
total_ndcg_k += ndcg
val_hit = total_hit_k / count
val_ndcg = total_ndcg_k / count
return [val_hit, val_ndcg]
def main():
global lr
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
all_val_hit = [-1]
early_stop_cn = 0
step_count = 0
train_loss_l = 0.
start_time = time.time()
print('Start training...')
try:
while True:
cur_batch = train_sampler.next_batch()
inp = np.array(cur_batch[1])
feed_dict = {model.inp: inp, model.lr: lr, model.dropout: args.dropout}
feed_dict[model.pos] = np.array(cur_batch[2])
feed_dict[model.neg] = np.array(cur_batch[3])
_, train_loss = sess.run([model.train_op, model.loss], feed_dict=feed_dict)
train_loss_l += train_loss
step_count += 1
if step_count % args.log_interval == 0:
cur_loss = train_loss_l / args.log_interval
elapsed = time.time() - start_time
print('| Totol step {:10d} | lr {:02.5f} | ms/batch {:5.2f} | loss {:5.3f}'.format(
step_count, lr, elapsed * 1000 / args.log_interval, cur_loss))
sys.stdout.flush()
train_loss_l = 0.
start_time = time.time()
if step_count % args.eval_interval == 0:
val_hit, val_ndcg = evaluate(val_data, sess)
all_val_hit.append(val_hit)
print('-' * 90)
print('| End of step {:10d} | valid hit@20 {:8.5f} | valid ndcg@20 {:8.5f}'.format(
step_count, val_hit, val_ndcg))
print('=' * 90)
sys.stdout.flush()
if all_val_hit[-1] <= all_val_hit[-2]:
lr /= 2.
lr = max(lr, 1e-6)
early_stop_cn += 1
else:
early_stop_cn = 0
model.saver.save(sess, checkpoint_dir + '/model.ckpt')
if early_stop_cn == 3:
print('Validation hit decreases in three consecutive epochs. Stop Training!')
sys.stdout.flush()
break
start_time = time.time()
except Exception as e:
print(str(e))
train_sampler.close()
exit(1)
train_sampler.close()
print('Done')
if __name__ == '__main__':
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
main()
| 6,636 | 41.006329 | 127 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/test.py | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import cv2
import numpy as np
import scipy.misc
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
#import torchvision.models as models
from datasets import get_dataset
from models import get_classification_model
from sr_models.model import RDN, Vgg19
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data-root-pos', type=str, default='./data',
help='path to dataset')
parser.add_argument('--data-root-neg', type=str, default='./data',
help='path to dataset')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='dataset name (default: pascal12)')
parser.add_argument('-a', '--arch', type=str, default='resnet50',
help='model architecture')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--input-channel', default=3, type=int,
help='number of input channel')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-every-epoch', type=int, default=10,
help='how many epochs to save a model.')
parser.add_argument('--output-path', default='./output_models', type=str, metavar='PATH',
help='path to output models')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--dataset_type', type=str, default='image',
help='which dataset to load.')
parser.add_argument('--carlibration', default=1.0, type=float,
help='carlibration factor for posterior')
parser.add_argument('--defense', default=1.0, type=float,
help='defense factor')
parser.add_argument('--save_path', type=str, default='./score.npy', help='save models')
parser.add_argument('--no_dilation', action='store_true', help='do not use dilated convolutions in attackers')
parser.add_argument('--sr-num-features', type=int, default=64)
parser.add_argument('--sr-growth-rate', type=int, default=64)
parser.add_argument('--sr-num-blocks', type=int, default=16)
parser.add_argument('--sr-num-layers', type=int, default=8)
parser.add_argument('--sr-scale', type=int, default=4)
parser.add_argument('--sr-weights-file', type=str, required=True)
parser.add_argument('--idx-stages', type=int, default=0)
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = get_classification_model(arch=args.arch, pretrained = args.pretrained,
input_channel=args.input_channel, num_classes=2, dilated=(not args.no_dilation))
#import ipdb; ipdb.set_trace()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:%d'%(args.gpu))
model.load_state_dict(checkpoint['state_dict'],strict=False)
print("=> loaded checkpoint '{}'"
.format(args.resume))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
test_dataset = get_dataset(name=args.dataset_type, root_pos=args.data_root_pos, root_neg=args.data_root_neg, flip=False)
if args.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
test_sampler = None
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
sr_model = RDN(scale_factor=args.sr_scale,
num_channels=3,
num_features=args.sr_num_features,
growth_rate=args.sr_growth_rate,
num_blocks=args.sr_num_blocks,
num_layers=args.sr_num_layers,
requires_grad=False).cuda(args.gpu)#.to(device)
checkpoint = torch.load(args.sr_weights_file, map_location='cuda:%d'%(args.gpu))
if 'state_dict' in checkpoint.keys():
sr_model.load_state_dict(checkpoint['state_dict'])
else:
sr_model.load_state_dict(checkpoint)
perception_net = Vgg19().cuda(args.gpu)
Precision, Recall, Score = test(test_loader, model, sr_model, perception_net, args)
np.save(args.save_path, Score)
def test(test_loader, model, sr_model, perception_net, args):
TP = 0
FP = 0
FN = 0
ACC = 0
# switch to eval mode
model.eval()
sr_model.eval()
# get the softmax weight
model_params = list(model.parameters())
weight_softmax = np.squeeze(model_params[-2].cpu().detach().numpy())
score = []
for i, (input, target, post_path) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
lr = 0
for ii in range(args.sr_scale):
for jj in range(args.sr_scale):
lr = lr + input[:, :, ii::args.sr_scale, jj::args.sr_scale] / (args.sr_scale * args.sr_scale)
lr = lr / 255.0
input = input / 255.0
preds_input = sr_model(lr)
if args.idx_stages > 0:
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
rec_features = abs( per_rec[args.idx_stages - 1] - per_gt[args.idx_stages - 1] )
output, layer4 = model( rec_features )
else:
rec_features0 = abs( preds_input - input )
output, layer4 = model( rec_features0 )
pred = (output[:,0] < output[:,1]).cpu().numpy()
target = target.cpu().numpy()
output = output.cpu().detach().numpy()
score.append(output[0])
TP += sum((target==pred)*(1==pred))
FP += sum((target!=pred)*(1==pred))
FN += sum((target!=pred)*(0==pred))
ACC += sum(target==pred)
print('%08d : Precision=%.4f , Recall = %.4f, Acc = %.4f' % (i+1, 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), 1.0*ACC/(i+1) ))
return 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), np.array(score)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 13,359 | 39.731707 | 124 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/train_sr.py | import argparse
import os
import copy
import torch
from torch import nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from sr_models.model import RDN, VGGLoss
from sr_models.datasets import TrainDataset, EvalDataset
from sr_models.utils import AverageMeter, calc_psnr, convert_rgb_to_y, denormalize
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-file', type=str, required=True)
parser.add_argument('--eval-file', type=str, required=True)
parser.add_argument('--outputs-dir', type=str, required=True)
parser.add_argument('--weights-file', type=str)
parser.add_argument('--num-features', type=int, default=64)
parser.add_argument('--growth-rate', type=int, default=64)
parser.add_argument('--num-blocks', type=int, default=16)
parser.add_argument('--num-layers', type=int, default=8)
parser.add_argument('--scale', type=int, default=4)
parser.add_argument('--patch-size', type=int, default=32)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr-decay', type=float, default=0.5)
parser.add_argument('--lr-decay-epoch', type=int, default=200)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num-epochs', type=int, default=800)
parser.add_argument('--num-save', type=int, default=100)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--gpu-id',type=int, default=0)
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--vgg-lambda', type=float, default=0.2)
parser.add_argument('--augment', action='store_true', help='whether applying jpeg and gaussian noising augmentation in training a sr model')
parser.add_argument('--completion', action='store_true', help='completion')
parser.add_argument('--colorization', action='store_true', help='colorization')
args = parser.parse_args()
args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))
if not os.path.exists(args.outputs_dir):
os.makedirs(args.outputs_dir)
cudnn.benchmark = True
device = torch.device('cuda:%d'%args.gpu_id if torch.cuda.is_available() else 'cpu')
torch.manual_seed(args.seed)
model = RDN(scale_factor=args.scale,
num_channels=3,
num_features=args.num_features,
growth_rate=args.growth_rate,
num_blocks=args.num_blocks,
num_layers=args.num_layers).to(device)
if args.weights_file is not None:
state_dict = model.state_dict()
for n, p in torch.load(args.weights_file, map_location=lambda storage, loc: storage).items():
if n in state_dict.keys():
state_dict[n].copy_(p)
else:
raise KeyError(n)
criterion = nn.L1Loss()
criterion_vgg = VGGLoss(args.gpu_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
train_dataset = TrainDataset(args.train_file, patch_size=args.patch_size, scale=args.scale, aug=args.augment, colorization=args.colorization, completion=args.completion)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
#eval_dataset = EvalDataset(args.eval_file, scale=args.scale)
#eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)
best_weights = copy.deepcopy(model.state_dict())
best_epoch = 0
best_psnr = 0.0
for epoch in range(args.num_epochs):
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (args.lr_decay ** (epoch // args.lr_decay_epoch))
model.train()
epoch_losses = AverageMeter()
with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size), ncols=80) as t:
t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))
for data in train_dataloader:
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
preds = model(inputs)
#import ipdb; ipdb.set_trace()
loss = criterion(preds, labels) + criterion_vgg(preds, labels) * args.vgg_lambda
epoch_losses.update(loss.item(), len(inputs))
optimizer.zero_grad()
loss.backward()
optimizer.step()
t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
t.update(len(inputs))
if (epoch + 1) % args.num_save == 0:
torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
| 4,950 | 41.316239 | 173 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/train.py | import argparse
import os
import random
import shutil
import time
import warnings
import sys
import cv2
import numpy as np
import scipy.misc
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from datasets import get_dataset
from models import get_classification_model
from sr_models.model import RDN, Vgg19
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data-root-pos', type=str, default='./data',
help='path to dataset')
parser.add_argument('--data-root-neg', type=str, default='./data',
help='path to dataset')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='dataset name (default: pascal12)')
parser.add_argument('-a', '--arch', type=str, default='resnet50',
help='model architecture')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--input-channel', default=3, type=int,
help='number of input channel')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-every-epoch', type=int, default=2,
help='how many epochs to save a model.')
parser.add_argument('--output-path', default='./output_models', type=str, metavar='PATH',
help='path to output models')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--dataset_type', type=str, default='image',
help='which dataset to load.')
parser.add_argument('--carlibration', default=1.0, type=float,
help='carlibration factor for posterior')
parser.add_argument('--defense', default=1.0, type=float,
help='defense factor')
parser.add_argument('--save_path', type=str, default='./score.npy', help='save models')
parser.add_argument('--no_dilation', action='store_true', help='do not use dilated convolutions in attackers')
parser.add_argument('--sr-num-features', type=int, default=64)
parser.add_argument('--sr-growth-rate', type=int, default=64)
parser.add_argument('--sr-num-blocks', type=int, default=16)
parser.add_argument('--sr-num-layers', type=int, default=8)
parser.add_argument('--sr-scale', type=int, default=4)
parser.add_argument('--sr-weights-file', type=str, required=True)
parser.add_argument('--idx-stages', type=int, default=0)
parser.add_argument('--lr-sr', default=0.02, type=float, help=' learning rate for resolution')
parser.add_argument('--lw-sr', default=1, type=float, help='loss weight for reconstruction')
parser.add_argument('--mode-sr', default='none', type=str, help='can be none, colorization or denoising.')
parser.add_argument('--fixed-sr', dest='fixed_sr', action='store_true',
help='use fixed super resolution model')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = get_classification_model(arch=args.arch, pretrained = args.pretrained,
input_channel=args.input_channel, num_classes=2, dilated=(not args.no_dilation))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
criterion2 = nn.L1Loss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:%d'%(args.gpu))
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'],strict=False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
test_dataset = get_dataset(name=args.dataset_type, root_pos=args.data_root_pos, root_neg=args.data_root_neg, flip=True)
if args.distributed:
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
else:
test_sampler = None
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=test_sampler)
sr_model = RDN(scale_factor=args.sr_scale,
num_channels=3,
num_features=args.sr_num_features,
growth_rate=args.sr_growth_rate,
num_blocks=args.sr_num_blocks,
num_layers=args.sr_num_layers,
requires_grad=False).cuda(args.gpu)#.to(device)
checkpoint = torch.load(args.sr_weights_file, map_location='cuda:%d'%(args.gpu))
if 'state_dict' in checkpoint.keys():
sr_model.load_state_dict(checkpoint['state_dict'])
else:
sr_model.load_state_dict(checkpoint)
perception_net = Vgg19().cuda(args.gpu)
if args.fixed_sr:
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
optimizer = torch.optim.SGD(list(model.parameters()) + list(sr_model.parameters()), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if not os.path.exists(args.output_path):
os.mkdir(args.output_path)
lr_epoch = 0.9999 * np.cos(np.pi / 2 * np.arange(args.epochs) / (args.epochs-1) ) + 0.0001
for epoch in range( args.epochs ):
# train for one epoch
Precision, Recall, Score = train(epoch, test_loader, model, sr_model, perception_net, optimizer, criterion, criterion2, args)
adjust_learning_rate(optimizer, lr_epoch[epoch] * args.lr )
if epoch % args.save_every_epoch == 0 or epoch == args.epochs-1:
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, is_best=False, filename='%s/%04d.pth.tar'%(args.output_path , epoch+1))
save_checkpoint({
'state_dict': sr_model.state_dict(),
}, is_best=False, filename='%s/%04d_sr.pth.tar'%(args.output_path , epoch+1))
def train(epoch, test_loader, model, sr_model, perception_net, optimizer, criterion, criterion2, args):
TP = 0
FP = 0
FN = 0
TOTAL = 0
CORRECT = 0
# switch to eval mode
model.train()
sr_model.train()
score = []
for i, (input, target, post_path) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
lr = 0
for ii in range(args.sr_scale):
for jj in range(args.sr_scale):
lr = lr + input[:, :, ii::args.sr_scale, jj::args.sr_scale] / (args.sr_scale * args.sr_scale)
lr = lr / 255.0
input = input / 255.0
optimizer.zero_grad()
preds_input = sr_model(lr)
if args.idx_stages > 0:
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
rec_features = abs( per_rec[args.idx_stages - 1] - per_gt[args.idx_stages - 1] )
output, aa = model( rec_features )
else:
rec_features0 = abs( preds_input - input )
output, aa = model( rec_features0 )
loss = criterion(output , target)
# compute output
loss2 = 0
if sum(target==1) > 1 and args.fixed_sr == False:
if np.random.uniform(0,1) > 0.5 and args.mode_sr == 'denoising':
dims = lr.shape
lr += torch.empty(lr.shape).normal_(mean=0,std=4.0 / 255).cuda()
elif np.random.uniform(0,1) > 0.5 and args.mode_sr == 'colorization':
dims = lr.shape
mask = np.random.uniform(0,1,(dims[0],dims[2],dims[3]))
mask = mask < np.random.uniform(0.1,0.25)
tmp = lr.mean(dim=1)
for i in range( dims[0] ):
lr[i,:,mask[i]] = tmp[i,mask[i]]
preds_input = sr_model(lr)
per_rec = perception_net(preds_input)
per_gt = perception_net(input)
loss_rec_real = criterion2(preds_input[target==1], input[target==1])
loss_rec_real_feat = criterion2(per_rec[0][target==1], per_gt[0][target==1])
loss_rec_real_feat2 = criterion2(per_rec[1][target==1], per_gt[1][target==1])
loss_rec_real_feat3 = criterion2(per_rec[2][target==1], per_gt[2][target==1])
loss_rec_real_feat4 = criterion2(per_rec[3][target==1], per_gt[3][target==1])
loss_rec_real_feat5 = criterion2(per_rec[4][target==1], per_gt[4][target==1])
loss2 = loss_rec_real * args.lw_sr
loss2 += (1.0 / 32) * (loss_rec_real_feat) * args.lw_sr
loss2 += (1.0 / 16) * (loss_rec_real_feat2 ) * args.lw_sr
loss2 += (1.0 / 8) * (loss_rec_real_feat3 ) * args.lw_sr
loss2 += (1.0 / 4) * (loss_rec_real_feat4 ) * args.lw_sr
loss2 += 1.0 * (loss_rec_real_feat5 ) * args.lw_sr
loss_total = loss + loss2
else:
loss_total = loss
loss_total.backward()
for p in model.parameters():
p.grad.mul_( args.lr_sr )
optimizer.step()
pred = (output[:,0] < output[:,1])
CORRECT += pred.eq(target.view_as(pred)).sum().item()
TOTAL += output.shape[0]
TP = TP + sum((target==pred)*(1==pred))
FP = FP + sum((target!=pred)*(1==pred))
FN = FN + sum((target!=pred)*(0==pred))
print('%d (%d/%d) : Precision=%.4f, Recall = %.4f, Accuracy,%.4f, Loss=%.4f, Loss_Rec=%.4f' % (epoch, i, len(test_loader), 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), 1.0*CORRECT/TOTAL,loss, loss2 ))
return 1.0*TP/(TP+FP), 1.0*TP/(TP+FN), np.array(score)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main()
| 17,834 | 40.866197 | 195 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/customize.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Custermized NN Module"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
import logging
torch_ver = torch.__version__[:3]
__all__ = ['FrozenBatchNorm2d', 'GlobalAvgPool2d', 'GramMatrix',
'View', 'Sum', 'Mean', 'Normalize', 'ConcurrentModule',
'PyramidPooling']
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels)
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return F.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class GramMatrix(nn.Module):
r""" Gram Matrix for a 4D convolutional featuremaps as a mini-batch
.. math::
\mathcal{G} = \sum_{h=1}^{H_i}\sum_{w=1}^{W_i} \mathcal{F}_{h,w}\mathcal{F}_{h,w}^T
"""
def forward(self, y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
class View(nn.Module):
"""Reshape the input into different size, an inplace operator, support
SelfParallel mode.
"""
def __init__(self, *args):
super(View, self).__init__()
if len(args) == 1 and isinstance(args[0], torch.Size):
self.size = args[0]
else:
self.size = torch.Size(args)
def forward(self, input):
return input.view(self.size)
class Sum(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Sum, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.sum(self.dim, self.keep_dim)
class Mean(nn.Module):
def __init__(self, dim, keep_dim=False):
super(Mean, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, input):
return input.mean(self.dim, self.keep_dim)
class Normalize(nn.Module):
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
Does:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}
for each subtensor v over dimension dim of input. Each subtensor is
flattened into a vector, i.e. :math:`\lVert v \rVert_p` is not a matrix
norm.
With default arguments normalizes over the second dimension with Euclidean
norm.
Args:
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
"""
def __init__(self, p=2, dim=1):
super(Normalize, self).__init__()
self.p = p
self.dim = dim
def forward(self, x):
return F.normalize(x, self.p, self.dim, eps=1e-8)
class ConcurrentModule(nn.ModuleList):
r"""Feed to a list of modules concurrently.
The outputs of the layers are concatenated at channel dimension.
Args:
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, modules=None):
super(ConcurrentModule, self).__init__(modules)
def forward(self, x):
outputs = []
for layer in self:
outputs.append(layer(x))
return torch.cat(outputs, 1)
class PyramidPooling(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, in_channels, norm_layer, up_kwargs):
super(PyramidPooling, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d(1)
self.pool2 = nn.AdaptiveAvgPool2d(2)
self.pool3 = nn.AdaptiveAvgPool2d(3)
self.pool4 = nn.AdaptiveAvgPool2d(6)
out_channels = int(in_channels/4)
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv3 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv4 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
# bilinear interpolate options
self._up_kwargs = up_kwargs
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.interpolate(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)
feat2 = F.interpolate(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)
feat3 = F.interpolate(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)
feat4 = F.interpolate(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)
return torch.cat((x, feat1, feat2, feat3, feat4), 1)
| 10,973 | 36.71134 | 99 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/resnet.py | """Dilated ResNet"""
import math
import torch
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
from .customize import GlobalAvgPool2d
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'BasicBlock', 'Bottleneck', 'get_resnet']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""ResNet BasicBlock
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1,
norm_layer=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1,
downsample=None, previous_dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(
planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def _sum_each(self, x, y):
assert(len(x) == len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Dilated Pre-trained ResNet Model, which preduces the stride of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, input_channel=3, num_classes=1000, dilated=True, multi_grid=False,
deep_base=True, norm_layer=nn.BatchNorm2d):
self.inplanes = 128 if deep_base else 64
super(ResNet, self).__init__()
if deep_base:
self.conv1 = nn.Sequential(
nn.Conv2d(input_channel, 64, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2, norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer)
if multi_grid:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
multi_grid=True)
else:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, #2
norm_layer=norm_layer)
self.avgpool = GlobalAvgPool2d()
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None, multi_grid=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion),
)
layers = []
multi_dilations = [4, 8, 16]
if multi_grid:
layers.append(block(self.inplanes, planes, stride, dilation=multi_dilations[0],
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, dilation=1,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2,
downsample=downsample, previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if multi_grid:
layers.append(block(self.inplanes, planes, dilation=multi_dilations[i],
previous_dilation=dilation, norm_layer=norm_layer))
else:
layers.append(block(self.inplanes, planes, dilation=dilation, previous_dilation=dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x2 = self.layer4(x)
x = self.avgpool(x2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, x2
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def get_resnet(arch, pretrained, **kwargs):
if arch == "resnet18":
model = resnet18(pretrained, **kwargs)
elif arch == "resnet34":
model = resnet34(pretrained, **kwargs)
elif arch == "resnet50":
model = resnet50(pretrained, **kwargs)
elif arch == "resnet101":
model = resnet101(pretrained, **kwargs)
elif arch == "resnet152":
model = resnet152(pretrained, **kwargs)
return model
| 11,165 | 35.135922 | 162 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/resnet_cifar.py | """Dilated ResNet"""
import torch.nn as nn
from .customize import FrozenBatchNorm2d
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv_1_3x3(input_channel):
return nn.Sequential(nn.Conv2d(input_channel, 64, kernel_size=3, stride=1, padding=1, bias=False), # 3, 64, 7, 2, 3
FrozenBatchNorm2d(64),
nn.ReLU(inplace=True))
# nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
class bottleneck(nn.Module):
def __init__(self, inplanes, planes, kernel_size, strides=(2, 2)):
super(bottleneck, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = FrozenBatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(plane3)
self.conv4 = nn.Conv2d(inplanes, plane3, kernel_size=1, stride=strides, padding=0, bias=False)
self.bn4 = FrozenBatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
shortcut = self.conv4(input_tensor)
shortcut = self.bn4(shortcut)
out += shortcut
out = self.relu(out)
return out
class basic_block(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, strides=(2, 2)):
super(basic_block, self).__init__()
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, stride=strides, padding=int((kernel_size - 1) / 2), bias=False)
self.bn1 = FrozenBatchNorm2d(outplanes)
self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(outplanes)
self.conv3 = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=strides, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
shortcut = self.conv3(input_tensor)
shortcut = self.bn3(shortcut)
out += shortcut
out = self.relu(out)
return out
class identity_block3(nn.Module):
def __init__(self, inplanes, planes, kernel_size):
super(identity_block3, self).__init__()
plane1, plane2, plane3 = planes
self.outchannels = plane3
self.conv1 = nn.Conv2d(inplanes, plane1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = FrozenBatchNorm2d(plane1)
self.conv2 = nn.Conv2d(plane1, plane2, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(plane2)
self.conv3 = nn.Conv2d(plane2, plane3, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = FrozenBatchNorm2d(plane3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor, return_conv3_out=False):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += input_tensor
out = self.relu(out)
return out
class identity_block2(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size):
super(identity_block2, self).__init__()
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn1 = FrozenBatchNorm2d(outplanes)
self.conv2 = nn.Conv2d(outplanes, outplanes, kernel_size=kernel_size, stride=1, padding=int((kernel_size - 1) / 2), bias=False)
self.bn2 = FrozenBatchNorm2d(outplanes)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_tensor, return_conv3_out=False):
out = self.conv1(input_tensor)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += input_tensor
out = self.relu(out)
return out
class Resnet50(nn.Module):
def __init__(self, input_channel, num_classes, include_top=True):
print('CIFAR Resnet50 is used')
super(Resnet50, self).__init__()
self.num_classes = num_classes
self.input_channel = input_channel
self.include_top = include_top
block_ex = 4
# Define the building blocks
self.conv_3x3 = conv_1_3x3( self.input_channel )
self.bottleneck_1 = bottleneck(16 * block_ex, [16 * block_ex, 16 * block_ex, 64 * block_ex], kernel_size=3, strides=(1, 1))
self.identity_block_1_1 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
self.identity_block_1_2 = identity_block3(64*block_ex, [16*block_ex, 16*block_ex, 64*block_ex], kernel_size=3)
self.bottleneck_2 = bottleneck(64*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3, strides=(2, 2))
self.identity_block_2_1 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.identity_block_2_2 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.identity_block_2_3 = identity_block3(128*block_ex, [32*block_ex, 32*block_ex, 128*block_ex], kernel_size=3)
self.bottleneck_3 = bottleneck(128*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3, strides=(1, 1))
self.identity_block_3_1 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_2 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_3 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_4 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.identity_block_3_5 = identity_block3(256*block_ex, [64*block_ex, 64*block_ex, 256*block_ex], kernel_size=3)
self.bottleneck_4 = bottleneck(256*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3, strides=(2, 2))
self.identity_block_4_1 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
self.identity_block_4_2 = identity_block3(512*block_ex, [128*block_ex, 128*block_ex, 512*block_ex], kernel_size=3)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(512*block_ex, num_classes)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, FrozenBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
x = self.conv_3x3(input_x)
ret1 = x
x = self.bottleneck_1(x)
x = self.identity_block_1_1(x)
x = self.identity_block_1_2(x)
ret2 = x
x = self.bottleneck_2(x)
x = self.identity_block_2_1(x)
x = self.identity_block_2_2(x)
x = self.identity_block_2_3(x)
ret3 = x
x = self.bottleneck_3(x)
x = self.identity_block_3_1(x)
x = self.identity_block_3_2(x)
x = self.identity_block_3_3(x)
x = self.identity_block_3_4(x)
x = self.identity_block_3_5(x)
ret4 = x
x = self.bottleneck_4(x)
x = self.identity_block_4_1(x)
x = self.identity_block_4_2(x)
ret5 = x
x = self.avgpool(x)
if self.include_top:
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Resnet18(nn.Module):
"""
100% CIFAR10: 95.08%
80% CIFAR10: 94.07%
60% CIFAR10: 93.08%
40% CIFAR10: 91.52%
20% CIFAR10: 86.49%
10% CIFAR10: 77.84%
5% CIFAR10: 62.15%
1% CIFAR10: 38.8%
0.5% CIFAR10: 17.46%
"""
def __init__(self, input_channel, num_classes):
print('CIFAR Resnet18 is used')
super(Resnet18, self).__init__()
self.num_classes = num_classes
self.input_channel = input_channel
# Define the building blocks
self.conv_3x3 = conv_1_3x3( self.input_channel )
self.identity_block_1_0 = identity_block2(64, 64, kernel_size=3)
self.identity_block_1_1 = identity_block2(64, 64, kernel_size=3)
self.basic_block_2 = basic_block(64, 128, kernel_size=3, strides=(2, 2))
self.identity_block_2_1 = identity_block2(128, 128, kernel_size=3)
self.basic_block_3 = basic_block(128, 256, kernel_size=3, strides=(1, 1))
self.identity_block_3_1 = identity_block2(256, 256, kernel_size=3)
self.basic_block_4 = basic_block(256, 512, kernel_size=3, strides=(2, 2))
self.identity_block_4_1 = identity_block2(512, 512, kernel_size=3)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(512, num_classes)
# Initialize the weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, FrozenBatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, input_x):
x = self.conv_3x3(input_x)
ret1 = x
x = self.identity_block_1_0(x)
x = self.identity_block_1_1(x)
ret2 = x
x = self.basic_block_2(x)
x = self.identity_block_2_1(x)
ret3 = x
x = self.basic_block_3(x)
x = self.identity_block_3_1(x)
ret4 = x
x = self.basic_block_4(x)
x = self.identity_block_4_1(x)
ret5 = x
print(x.shape)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def get_cifar_resnet(arch, pretrained, **kwargs):
if arch == "resnet18":
model = Resnet18(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
elif arch == "resnet50":
model = Resnet50(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
| 11,254 | 37.412969 | 140 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/models/__init__.py | from .resnet import get_resnet
from .resnet_cifar import get_cifar_resnet
def get_classification_model(arch, pretrained, **kwargs):
return get_resnet(arch, pretrained, **kwargs)
def get_cifar_classification_model(arch, pretrained, **kwargs):
return get_cifar_resnet(arch, pretrained, **kwargs)
| 304 | 32.888889 | 63 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/utils.py | import torch
import numpy as np
def convert_rgb_to_y(img, dim_order='hwc'):
if dim_order == 'hwc':
return 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.
else:
return 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.
def denormalize(img):
img = img.mul(255.0).clamp(0.0, 255.0)
return img
def preprocess(img, device):
img = np.array(img).astype(np.float32)
ycbcr = convert_rgb_to_ycbcr(img)
x = ycbcr[..., 0]
x /= 255.
x = torch.from_numpy(x).to(device)
x = x.unsqueeze(0).unsqueeze(0)
return x, ycbcr
def calc_psnr(img1, img2, max=255.0):
return 10. * ((max ** 2) / ((img1 - img2) ** 2).mean()).log10()
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 1,061 | 22.086957 | 97 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/model.py | import torch
from torch import nn
class DenseLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(DenseLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=3 // 2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return torch.cat([x, self.relu(self.conv(x))], 1)
class RDB(nn.Module):
def __init__(self, in_channels, growth_rate, num_layers):
super(RDB, self).__init__()
self.layers = nn.Sequential(*[DenseLayer(in_channels + growth_rate * i, growth_rate) for i in range(num_layers)])
# local feature fusion
self.lff = nn.Conv2d(in_channels + growth_rate * num_layers, growth_rate, kernel_size=1)
def forward(self, x):
return x + self.lff(self.layers(x)) # local residual learning
class RDN(nn.Module):
def __init__(self, scale_factor, num_channels, num_features, growth_rate, num_blocks, num_layers, requires_grad=True):
super(RDN, self).__init__()
self.G0 = num_features
self.G = growth_rate
self.D = num_blocks
self.C = num_layers
# shallow feature extraction
self.sfe1 = nn.Conv2d(num_channels, num_features, kernel_size=3, padding=3 // 2)
self.sfe2 = nn.Conv2d(num_features, num_features, kernel_size=3, padding=3 // 2)
# residual dense blocks
self.rdbs = nn.ModuleList([RDB(self.G0, self.G, self.C)])
for _ in range(self.D - 1):
self.rdbs.append(RDB(self.G, self.G, self.C))
# global feature fusion
self.gff = nn.Sequential(
nn.Conv2d(self.G * self.D, self.G0, kernel_size=1),
nn.Conv2d(self.G0, self.G0, kernel_size=3, padding=3 // 2)
)
# up-sampling
assert 2 <= scale_factor <= 4
if scale_factor == 2 or scale_factor == 4:
self.upscale = []
for _ in range(scale_factor // 2):
self.upscale.extend([nn.Conv2d(self.G0, self.G0 * (2 ** 2), kernel_size=3, padding=3 // 2),
nn.PixelShuffle(2)])
self.upscale = nn.Sequential(*self.upscale)
else:
self.upscale = nn.Sequential(
nn.Conv2d(self.G0, self.G0 * (scale_factor ** 2), kernel_size=3, padding=3 // 2),
nn.PixelShuffle(scale_factor)
)
self.output = nn.Conv2d(self.G0, num_channels, kernel_size=3, padding=3 // 2)
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
sfe1 = self.sfe1(x)
sfe2 = self.sfe2(sfe1)
x = sfe2
local_features = []
for i in range(self.D):
x = self.rdbs[i](x)
local_features.append(x)
x = self.gff(torch.cat(local_features, 1)) + sfe1 # global residual learning
x = self.upscale(x)
x = self.output(x)
return x
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda(device = gpu_ids)
#self.criterion = nn.L1Loss()
self.criterion = nn.MSELoss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| 4,943 | 36.172932 | 122 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/sr_models/datasets.py | import random
import h5py
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import convolve
from io import BytesIO
import copy
class TrainDataset(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class TrainDataset256(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset256, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img = img.resize((256 , 256), resample=Image.BICUBIC)
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class EvalDataset(Dataset):
def __init__(self, file_path, scale):
super(EvalDataset, self).__init__()
self.files = ParseFile(file_path)
self.scale = scale
def __getitem__(self, idx):
hr = np.array(Image.open(self.files[idx])).astype('float')
hr2 = hr.copy()
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
return lr, hr
def __len__(self):
return len(self.files)
def ParseFile(filepath):
output = []
with open(filepath) as fp:
for line in fp:
output.append(line[:-1])
return output
class EvalDataset256(Dataset):
def __init__(self, file_path, scale):
super(EvalDataset256, self).__init__()
self.files = ParseFile(file_path)
self.scale = scale
def __getitem__(self, idx):
hr = np.array(Image.open(self.files[idx])).astype('float')
hr = hr.resize((256 , 256), resample=Image.BICUBIC)
hr2 = hr.copy()
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
return lr, hr
def __len__(self):
return len(self.files)
def ParseFile(filepath):
output = []
with open(filepath) as fp:
for line in fp:
output.append(line[:-1])
return output
| 10,057 | 32.415282 | 102 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/base.py | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2017
###########################################################################
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
__all__ = ['BaseDataset']
class BaseDataset(data.Dataset):
def __init__(self, root_pos, root_neg, flip=True):
self.root_pos = root_pos
self.root_neg = root_neg
self.flip = flip
def __getitem__(self, index):
raise NotImplemented
@property
def num_class(self):
return self.NUM_CLASS
@property
def pred_offset(self):
raise NotImplemented
def make_pred(self, x):
return x + self.pred_offset
| 835 | 22.222222 | 75 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/image_dataset.py | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2018
###########################################################################
import os
import sys
import random
import numpy as np
from tqdm import tqdm, trange
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
from os import listdir
from os.path import isfile, join
from scipy.interpolate import interp2d
class BinaryImageDataset(BaseDataset):
def __init__(self, root_pos=os.path.expanduser('/BS/work/data_pos'), root_neg=os.path.expanduser('/BS/work/data_neg'), flip=True, **kwargs):
super(BinaryImageDataset, self).__init__(
root_pos, root_neg, flip, **kwargs)
self.files = get_data_pairs(self.root_pos, self.root_neg)
assert (len(self.files[0]) == len(self.files[1]))
if len(self.files) == 3:
assert (len(self.files[0]) == len(self.files[2]))
if len(self.files[0]) == 0:
raise RuntimeError("Found 0 images in subfolders of: \
" + self.root + "\n")
print("Found %d examples" % len(self.files[0]))
def __getitem__(self, index):
tmp = Image.open(self.files[0][index][:])
data = np.array(tmp)
data = data.transpose(2, 0, 1)
if self.flip:
flip_step = np.random.randint(0, 2) * 2 - 1
data = data[:, :, ::flip_step]
label = self.files[1][index]
data = torch.from_numpy(data.copy()).float()
label = torch.tensor(label).long()
return data, label, self.files[0][index]
def __len__(self):
return len(self.files[0])
def get_data_pairs(pos_folder, neg_folder):
def get_pairs(pos_folder, neg_folder):
pos_data = sorted([os.path.join(pos_folder, f) for f in listdir(pos_folder) if isfile(join(pos_folder, f))])
neg_data = sorted([os.path.join(neg_folder, f) for f in listdir(neg_folder) if isfile(join(neg_folder, f))])
return pos_data, neg_data
pos_data, neg_data = get_pairs(pos_folder, neg_folder)
return [pos_data+neg_data, [1]*len(pos_data)+[0]*len(neg_data)]
| 2,300 | 31.871429 | 144 | py |
BeyondtheSpectrum | BeyondtheSpectrum-main/datasets/__init__.py | import warnings
from torchvision.datasets import *
from .base import *
from .image_dataset import BinaryImageDataset
datasets = {
'image': BinaryImageDataset,
}
def get_dataset(name, **kwargs):
return datasets[name.lower()](**kwargs)
| 246 | 16.642857 | 45 | py |
ps-lite | ps-lite-master/tracker/dmlc_local.py | #!/usr/bin/env python
"""
DMLC submission script, local machine version
"""
import argparse
import sys
import os
import subprocess
from threading import Thread
import tracker
import signal
import logging
keepalive = """
nrep=0
rc=254
while [ $rc -eq 254 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
"""
class LocalLauncher(object):
def __init__(self, args, unknown):
self.args = args
self.cmd = ' '.join(args.command) + ' ' + ' '.join(unknown)
def exec_cmd(self, cmd, role, pass_env):
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_ROLE'] = role
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env = env)
if ret == 254:
ntrial += 1
continue
else:
bash = keepalive % (cmd)
ret = subprocess.call(bash, shell=True, executable='bash', env = env)
if ret == 0:
logging.debug('Thread %d exit with 0')
return
else:
if os.name == 'nt':
os.exit(-1)
else:
raise Exception('Get nonzero return code=%d' % ret)
def submit(self):
def mthread_submit(nworker, nserver, envs):
"""
customized submit script
"""
procs = {}
for i in range(nworker + nserver):
role = 'worker' if i < nworker else 'server'
procs[i] = Thread(target = self.exec_cmd, args = (self.cmd, role, envs))
procs[i].setDaemon(True)
procs[i].start()
return mthread_submit
def run(self):
tracker.config_logger(self.args)
tracker.submit(self.args.num_workers,
self.args.num_servers,
fun_submit = self.submit(),
pscmd = self.cmd)
def main():
parser = argparse.ArgumentParser(
description='DMLC script to submit dmlc jobs as local process')
parser.add_argument('-n', '--num-workers', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
launcher = LocalLauncher(args, unknown)
launcher.run()
if __name__ == '__main__':
main()
| 3,051 | 29.217822 | 88 | py |
ps-lite | ps-lite-master/tracker/tracker.py | """
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
import sys
import os
import socket
import struct
import subprocess
import time
import logging
import random
from threading import Thread
"""
Extension of socket to handle recv and send of special data
"""
class ExSocket:
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
sock = self.sock
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return ''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s)
def recvstr(self):
slen = self.recvint()
return self.recvall(slen)
# magic number used to verify existence of data
kMagic = 0xff99
class SlaveEntry:
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = socket.gethostbyname(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for i in xrange(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker:
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port = 9091, port_end = 9999):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error:
continue
sock.listen(16)
self.sock = sock
self.hostIP = hostIP
logging.info('start listen on %s:%d' % (hostIP, self.port))
def __del__(self):
self.sock.close()
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_neighbor(self, rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank / 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) / 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def handle_print(self,slave, msg):
logging.info(msg.strip())
sys.stdout.write(msg)
sys.stdout.flush()
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
self.handle_print(s, msg)
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d' % (s.cmd, s.rank))
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map == None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = range(nslave)
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key = lambda x : x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d' % (s.cmd, s.host, s.rank))
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started' % nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d' % (s.cmd, s.rank))
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish' % str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target = run, args = ())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker:
"""Start the schduler node in PS
"""
def __init__(self, hostIP, cmd, port = 9091, port_end = 9999, envs = {}):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
self.hostIP = hostIP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
max_retry = 100
for i in range(0, max_retry):
if i + 1 == max_retry:
raise Exception('the schduler is faild to bind a port')
port = random.randint(port, port_end)
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(target = (lambda : subprocess.check_call(self.cmd, env=env, shell=True)), args = ())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def submit(nworker, nserver, fun_submit, hostIP = 'auto', pscmd = None):
"""submit job
Paramaters
----------
nworker : int
number of workers
nserver : int
number of servers, if 0 then submit as rabit a job, otherwise sumbit as
a parmaeter server job
fun_sumbit : func
the function to submit the jobs for servers and workers
hostIP : str, optional
the host ip of the root node
pscmd :
"""
# get the root node ip
if hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 0))
hostIP = s.getsockname()[0]
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
# start the root
if nserver == 0:
rabit = RabitTracker(hostIP = hostIP, nslave = nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP = hostIP, cmd=pscmd, envs = envs)
envs.update(pserver.slave_envs())
# start the workers and servers
fun_submit(nworker, nserver, envs)
# wait the root finished
if nserver == 0:
rabit.join()
else:
pserver.join()
def config_logger(args):
FORMAT = '%(asctime)s %(levelname)s %(message)s'
level = args.log_level if 'log_level' in args else 'DEBUG'
level = eval('logging.' + level)
if 'log_file' not in args or args.log_file is None:
logging.basicConfig(format=FORMAT, level = level)
else:
logging.basicConfig(format=FORMAT, level = level, filename = args.log_file)
console = logging.StreamHandler()
console.setFormatter(logging.Formatter(FORMAT))
console.setLevel(level)
logging.getLogger('').addHandler(console)
| 14,308 | 31.970046 | 113 | py |
ps-lite | ps-lite-master/tracker/dmlc_ssh.py | #!/usr/bin/env python
"""
DMLC submission script by ssh
One need to make sure all slaves machines are ssh-able.
"""
import argparse
import sys
import os
import subprocess
import tracker
import logging
from threading import Thread
class SSHLauncher(object):
def __init__(self, args, unknown):
self.args = args
self.cmd = (' '.join(args.command) + ' ' + ' '.join(unknown))
assert args.hostfile is not None
with open(args.hostfile) as f:
hosts = f.readlines()
assert len(hosts) > 0
self.hosts=[]
for h in hosts:
if len(h.strip()) > 0:
self.hosts.append(h.strip())
def sync_dir(self, local_dir, slave_node, slave_dir):
"""
sync the working directory from root node into slave node
"""
remote = slave_node + ':' + slave_dir
logging.info('rsync %s -> %s', local_dir, remote)
# TODO uses multithread
prog = 'rsync -az --rsh="ssh -o StrictHostKeyChecking=no" %s %s' % (
local_dir, remote)
subprocess.check_call([prog], shell = True)
def get_env(self, pass_envs):
envs = []
# get system envs
keys = ['LD_LIBRARY_PATH', 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in pass_envs.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def submit(self):
def ssh_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# thread func to run the job
def run(prog):
subprocess.check_call(prog, shell = True)
# sync programs if necessary
local_dir = os.getcwd()+'/'
working_dir = local_dir
if self.args.sync_dir is not None:
working_dir = self.args.sync_dir
for h in self.hosts:
self.sync_dir(local_dir, h, working_dir)
# launch jobs
for i in range(nworker + nserver):
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
node = self.hosts[i % len(self.hosts)]
prog = self.get_env(pass_envs) + ' cd ' + working_dir + '; ' + self.cmd
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' \'' + prog + '\''
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
return ssh_submit
def run(self):
tracker.config_logger(self.args)
tracker.submit(self.args.num_workers,
self.args.num_servers,
fun_submit = self.submit(),
pscmd = self.cmd)
def main():
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc job using ssh')
parser.add_argument('-n', '--num-workers', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of all slave nodes')
parser.add_argument('command', nargs='+',
help = 'command for dmlc program')
parser.add_argument('--sync-dir', type=str,
help = 'if specificed, it will sync the current \
directory into slave machines\'s SYNC_DIR')
args, unknown = parser.parse_known_args()
launcher = SSHLauncher(args, unknown)
launcher.run()
if __name__ == '__main__':
main()
| 3,896 | 33.184211 | 92 | py |
ps-lite | ps-lite-master/tracker/dmlc_mpi.py | #!/usr/bin/env python
"""
DMLC submission script, MPI version
"""
import argparse
import sys
import os
import subprocess
import tracker
from threading import Thread
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc job using MPI')
parser.add_argument('-n', '--nworker', required=True, type=int,
help = 'number of worker proccess to be launched')
parser.add_argument('-s', '--server-nodes', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('-H', '--hostfile', type=str,
help = 'the hostfile of mpi server')
parser.add_argument('command', nargs='+',
help = 'command for dmlc program')
parser.add_argument('--host-ip', type=str,
help = 'the scheduler ip', default='ip')
args, unknown = parser.parse_known_args()
#
# submission script using MPI
#
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
outfile="/tmp/mpiver"
os.system("mpirun 1>/tmp/mpiver 2>/tmp/mpiver")
with open (outfile, "r") as infile:
mpi_ver = infile.read()
cmd = ''
if 'Open MPI' in mpi_ver:
for k, v in envs.items():
cmd += ' -x %s=%s' % (k, str(v))
elif 'mpich' in mpi_ver:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise Exception('unknow mpi version %s' % (mpi_ver))
return cmd
def mpi_submit(nworker, nserver, pass_envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
pass_envs enviroment variables to be added to the starting programs
"""
def run(prog):
""""""
subprocess.check_call(prog, shell = True)
cmd = ''
if args.hostfile is not None:
cmd = '--hostfile %s' % (args.hostfile)
cmd += ' ' + ' '.join(args.command) + ' ' + ' '.join(unknown)
# start servers
if nserver > 0:
pass_envs['DMLC_ROLE'] = 'server'
prog = 'mpirun -n %d %s %s' % (nserver, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
if nworker > 0:
pass_envs['DMLC_ROLE'] = 'worker'
prog = 'mpirun -n %d %s %s' % (nworker, get_mpi_env(pass_envs), cmd)
thread = Thread(target = run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.config_logger(args)
tracker.submit(args.nworker, args.server_nodes, fun_submit = mpi_submit,
hostIP=args.host_ip,
pscmd=(' '.join(args.command) + ' ' + ' '.join(unknown)))
| 3,173 | 33.5 | 92 | py |
ps-lite | ps-lite-master/tests/lint.py | #!/usr/bin/env python
# pylint: disable=protected-access, unused-variable, locally-disabled, redefined-variable-type
"""Lint helper to generate lint summary of source.
Copyright by Contributors
"""
import codecs
import sys
import re
import os
import cpplint
from cpplint import _cpplint_state
from pylint import epylint
CXX_SUFFIX = set(['cc', 'c', 'cpp', 'h', 'cu', 'hpp'])
PYTHON_SUFFIX = set(['py'])
class LintHelper(object):
"""Class to help runing the lint and records summary"""
@staticmethod
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
def __init__(self):
self.project_name = None
self.cpp_header_map = {}
self.cpp_src_map = {}
self.python_map = {}
pylint_disable = ['superfluous-parens',
'too-many-instance-attributes',
'too-few-public-methods']
# setup pylint
self.pylint_opts = ['--extension-pkg-whitelist=numpy',
'--disable=' + ','.join(pylint_disable)]
self.pylint_cats = set(['error', 'warning', 'convention', 'refactor'])
# setup cpp lint
cpplint_args = ['.', '--extensions=' + (','.join(CXX_SUFFIX))]
_ = cpplint.ParseArguments(cpplint_args)
cpplint._SetFilters(','.join(['-build/c++11',
'-build/namespaces',
'-build/include,',
'+build/include_what_you_use',
'+build/include_order']))
cpplint._SetCountingStyle('toplevel')
cpplint._line_length = 100
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print pylint_stderr.read()
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
# singleton helper for lint check
_HELPER = LintHelper()
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = get_header_guard_dmlc
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX and (not fname.endswith(".pb.h")) and (not fname.endswith(".pb.cc")):
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
def main():
"""Main entry function."""
if len(sys.argv) < 3:
print('Usage: <project-name> <filetype> <list-of-path to traverse>')
print('\tfiletype can be python/cpp/all')
exit(-1)
_HELPER.project_name = sys.argv[1]
file_type = sys.argv[2]
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in sys.argv[3:]:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
if __name__ == '__main__':
main()
| 6,474 | 36.212644 | 98 | py |
ps-lite | ps-lite-master/docs/sphinx_util.py | import sys, os, subprocess
if not os.path.exists('../recommonmark'):
subprocess.call('cd ..; git clone https://github.com/tqchen/recommonmark', shell = True)
else:
subprocess.call('cd ../recommonmark; git pull', shell=True)
sys.path.insert(0, os.path.abspath('../recommonmark/'))
from recommonmark import parser, transform
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
# MarkdownParser.github_doc_root = github_doc_root
def generate_doxygen_xml(app):
"""Run the doxygen make commands"""
subprocess.call('doxygen')
| 571 | 27.6 | 92 | py |
ps-lite | ps-lite-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# ps-lite documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 20 20:12:23 2016.
#
# Mu: additional changes
# - add breathe into extensions
# - change html theme into sphinx_rtd_theme
# - add sphnix_util.py
# - add .md into source_suffix
# - add setup() at the end
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.md']
# source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ps-lite'
copyright = u'2016, ps-lite developers'
author = u'ps-lite developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ps-litedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ps-lite.tex', u'ps-lite Documentation',
u'ps-lite developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ps-lite', u'ps-lite Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ps-lite', u'ps-lite Documentation',
author, 'ps-lite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, curr_path)
from sphinx_util import MarkdownParser, AutoStructify, generate_doxygen_xml
MarkdownParser.github_doc_root = 'https://github.com/dmlc/ps-lite/tree/master/'
source_parsers = {
'.md': MarkdownParser,
'.Rmd': MarkdownParser,
}
breathe_projects = {'ps-lite' : 'xml/'}
breathe_default_project = 'ps-lite'
doc_root = 'http://dmlc.ml'
def setup(app):
app.connect("builder-inited", generate_doxygen_xml)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: doc_root + url,
}, True)
app.add_transform(AutoStructify)
| 10,101 | 30.968354 | 80 | py |
SLIT | SLIT-master/setup.py | from setuptools import setup
setup(name='SLIT',
version='0.1',
description='Code for colour lens/source separation and lensed source reconstruction',
author='Remy Joseph, Frederic Courbin, Jean-Luc Starck',
author_email='remy.joseph@epfl.ch',
packages=['SLIT'],
zip_safe=False)
| 315 | 30.6 | 92 | py |
SLIT | SLIT-master/Tests/Result_slope.py | import numpy as np
import matplotlib.pyplot as plt
import SLIT
import pyfits as pf
import matplotlib.cm as cm
import os
import glob
nsim = 11
ranges = np.array([1.9,1.95,2.0,2.025,2.05])#np.linspace(0,1,11)
Truth = pf.open('IMG2.fits')[0].data
sigma = 0.00119
Sources = 0
FSs = 0
#thetas = np.zeros((nsim, ranges.size))
Reses = np.zeros((nsim, ranges.size))
SDRs = np.zeros((nsim, ranges.size))
shifts = np.zeros((nsim, ranges.size))
L1s = np.zeros((nsim, ranges.size))
Sources = 0
Images = 0
FSs = 0
x = 0
for i in ranges:
y = 0
Images = 0
Sources = 0
FSs = 0
for sim in range(nsim):
files = glob.glob('../Results_slope/Source_'+str(sim)+'_'+str(i)+'.fits')[0]
Source = pf.open(files)[0].data
FS = pf.open('../Results_slope/Lensed_source'+files[23:])[0].data
Image = pf.open('../Results_slope/Image'+files[23:])[0].data
FSs+=FS
Sources += Source
Images += Image
Reses[y,x] = (np.std(Image-FS)/sigma)
SDRs[y,x] = 10*np.log10(np.sqrt(np.sum(Truth[Truth!=0]**2))/np.sqrt(np.sum((Source[Truth!=0]-Truth[Truth!=0])**2)))
L1s[y,x] = np.sum(np.abs(SLIT.tools.wave_transform(Source, 6)))
log = files.split('_')
shifts[y,x] = np.float(i)
y+=1
## plt.figure(10)
## plt.imshow(Images-FSs+np.random.randn(FSs.shape[0],FSs.shape[1])*sigma*nsim, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Residuals'+str(i))
## plt.figure(30)
## plt.imshow(Images, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Images'+str(i))
## plt.figure(20)
## plt.imshow(Sources, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Source'+str(i))
## plt.figure(40)
## plt.imshow(Sources-Truth*nsim, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Source Residuals'+str(i))
## plt.show()
x+=1
mean_SDRs = np.mean(SDRs,axis = 0)
sigma_SDRs = np.std(SDRs,axis = 0)
mean_L1s = np.mean(L1s,axis = 0)
sigma_L1s = np.std(L1s,axis = 0)
mean_Reses = np.mean(Reses,axis = 0)
sigma_Reses = np.std(Reses,axis = 0)
plt.figure(0)
plt.errorbar(ranges, mean_SDRs, yerr = sigma_SDRs)
plt.xlabel('Shifts')
plt.ylabel('SDR')
#plt.axis([-0.1,ranges.max()+0.1,mean_SDRs.min()-2*sigma_SDRs[mean_SDRs==mean_SDRs.min()],mean_SDRs.max()+2*sigma_SDRs[mean_SDRs==mean_SDRs.max()]])
plt.figure(1)
plt.errorbar(ranges, mean_Reses, yerr = sigma_Reses)
#plt.axis([-0.1,ranges.max()+0.1,mean_Reses.min()-2*sigma_Reses[mean_Reses==mean_Reses.min()],mean_Reses.max()+2*sigma_Reses[mean_Reses==mean_Reses.max()]])
plt.xlabel('Shifts')
plt.ylabel('Residuals')
plt.figure(2)
plt.errorbar(ranges, mean_L1s, yerr = sigma_L1s)
#plt.axis([-0.1,ranges.max()+0.1,mean_Reses.min()-2*sigma_Reses[mean_Reses==mean_Reses.min()],mean_Reses.max()+2*sigma_Reses[mean_Reses==mean_Reses.max()]])
plt.xlabel('Shifts')
plt.ylabel('L1')
#plt.figure(2)
#sc0 = plt.scatter(thetas.flatten(), SDRs.flatten(), c = np.array(shifts.flatten()))
#plt.colorbar(sc0)
#plt.xlabel('theta')
#plt.ylabel('SDR')
#plt.figure(3)
#sc = plt.scatter(thetas.flatten(), Reses.flatten(), c = np.array(shifts.flatten()))
#plt.colorbar(sc)
#plt.xlabel('theta')
#plt.ylabel('Residuals')
plt.show()
| 3,313 | 30.865385 | 156 | py |
SLIT | SLIT-master/Tests/Launch_Test.py | import Test_center as tc
import sys
import numpy as np
variable = sys.argv[1]
shift = sys.argv[2]
if variable == 'center':
tc.test_center(np.float(shift))
if variable == 'slope':
tc.test_slope(np.float(shift))
| 224 | 13.0625 | 35 | py |
SLIT | SLIT-master/Tests/gaussian.py | import numpy as np
import scipy.misc as spm
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def gaussian(n1,n2,x0,y0,A,e1,e2,alpha):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[(e1*e1),0],[0,(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
ii = np.array(xc)
jj = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1., n1*n2-1.)
count = np.int_(count)
valcor = np.array([ii,jj]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[ii[k],jj[k]]= var
Img = (ampli*np.exp(-0.5*AA))
return Img
def moffat(n1,n2,x0,y0,A,e1,e2,alpha,beta):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[1./(e1*e1),0],[0,1./(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
i = np.array(xc)
j = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1, n1*n2-1)
count = np.int_(count)
valcor = np.array([i,j]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[i[k],j[k]]= var
Img = (ampli*(1+AA**2)**(-beta))
return Img
def sersic(n1,n2,x0,y0,A,e1,e2,alpha,n):
#img = gaussian(n1,n2,x0,y0,A,e1,e2,alpha)
#produces a gaussian profile image
#INPUTS:
# n1,n2: size of the output image
# x0,y0: centroid of the gaussian profile
# A: value of the maximum value for the gaussian profile
# e1,e2: ellipticity og the profile
# alpha: inclination of the profile
#OUTPUTS:
# img: n1xn2 image containing the gaussian profile
Img = np.zeros([n1,n2])
valcor = np.zeros([2,n1*n2])
AA = np.zeros([n1,n2])
xx0 = np.zeros(n1*n2)
xx0[:]=x0
yy0 = np.zeros(n2*n1)
yy0[:]=y0
coord0 = np.zeros([2, n1*n2])
coord = np.zeros([2, n1*n2])
# terme d'amplitude
ampli = A/(2*np.pi*np.sqrt(e1*e2))
mat_rot = [[np.cos(alpha), np.sin(alpha)],[-np.sin(alpha), np.cos(alpha)]]
tmat_rot = np.transpose(mat_rot)
matell = [[1./(e1*e1),0],[0,1./(e2*e2)]]
# Matrice des moments quadripolaires
matA = np.mat(np.dot(np.dot(tmat_rot,matell),mat_rot))
xc, yc = np.where(Img == 0)
i = np.array(xc)
j = np.array(yc)
#print(np.shape(i), np.shape(xx0))
count = np.linspace(0,n1*n2-1, n1*n2-1)
count = np.int_(count)
valcor = np.array([i,j]) - np.array([xx0,yy0])
valcor = np.array(valcor)
for k in count:
val = np.mat(valcor[:,k])
invA = np.array(np.linalg.inv(matA))
var = np.dot(np.dot(val,invA),np.transpose(val))
AA[i[k],j[k]]= var
Img = (ampli*np.exp(-AA**(1/n)))
return Img
def add_noise(img, mean, sigma):
shp = np.shape(img)
n1 = shp[0]
cov = numpy.identity(2)
noise = np.random.multovariate_normal([mean,mean], cov, [128,128] )
imfinal = img+noise[:,:,0]
return imfinal
| 4,915 | 26.463687 | 78 | py |
SLIT | SLIT-master/Tests/Test_center.py | import pyfits as pf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
import SLIT
import gaussian as gs
import time
from scipy import signal as scp
import warnings
warnings.simplefilter("ignore")
#Example of a run of the SLIT algorithm on simulated images.
#Here the first part of the file shows how simulations are generated.
#For users intereseted only in seeing the code run, have a look at the running SLIT section.
#The command line that builds the Fkappa operator is also of outmost importance.
#Source light profile
newsource = pf.open('IMG2.fits')[0].data#('./Files/source.fits')[0].data
##N1,N2 are the numbers of pixels in the image plane.
nt1= 100
nt2 = 100
#Size ratio of the source to image number of pixels
#PSF
PSF0 = gs.gaussian(64,64,32,32,1,2/(2.*(2*np.log(2))**0.5),2/(2.*(2*np.log(2))**0.5),0)
PSF0[-2:,-2:] = 0
PSF = PSF0[1:,1:]
PSF = PSF/np.sum(PSF)
## Lens mass distribution.
Re = 1.3/0.05
xc = 0.8
q = 0.99
gamma_true = 2.0
theta = 25.2
extra = 500
######## SIS parameters
xsie, ysie = 50.,50.
n1sie, n2sie = 100,100
Re = 15
sizeSIE = 2
nsim = 50
kmax = 5
niter =100
levels = [0]
lvl = 4
def test_center(shift):
kappa_true = SLIT.Lens.SIE(extra/2.+xsie,extra/2.+xsie,n1sie+extra,n1sie+extra,Re,betata,q,xc, thetata)
#plt.imshow(kappa_true); plt.show()#
#SLIT.Lens.SIS(xsis+extra/2,ysis+extra/2,n1sis+extra,n2sis+extra,Re)
alphax_SIE_true, alphay_SIE_true = SLIT.Lens.alpha_def(kappa_true, n1sie,n2sie, extra)
#Mapping between lens and source IMPORTANT
Fkappa_true = SLIT.Lens.F(kappa_true, n1sie,n2sie, sizeSIE, extra = extra)
hdus = pf.PrimaryHDU(alphax_SIE_true)
lists = pf.HDUList([hdus])
lists.writeto('../Results_center/alphax_SIE_true.fits', clobber=True)
hdus = pf.PrimaryHDU(alphay_SIE_true)
lists = pf.HDUList([hdus])
lists.writeto('../Results_center/alphay_SIE_true.fits', clobber=True)
#Generation of lensed source
I2 = SLIT.Lens.source_to_image(newsource, n1sie ,n2sie , Fkappa_true)
#Noise levels
SNR = 100
sigma = np.sqrt(np.sum(I2**2)/SNR/(nt1*nt2*sizeSIE**2))
print('sigma: ',sigma)
#Convolution by the PSF and generation of the final image
I2 = scp.fftconvolve(I2, PSF, mode = 'same')
print(shift)
for i in range(nsim):
theta = np.random.rand(1)[0]*np.pi
x = shift*np.cos(theta)
y = shift*np.sin(theta)
kappa = SLIT.Lens.SIE(xsie+x+extra/2,ysie+y+extra/2,n1sie+extra,n2sie+extra,Re,betata,q,xc, thetata)
alphax_SIE, alphay_SIE = SLIT.Lens.alpha_def(kappa, n1sie,n2sie, extra)
#Mapping between lens and source IMPORTANT
Fkappa = SLIT.Lens.F(kappa, nt1,nt2, sizeSIE, extra = extra)
#Final simulated image
Image = I2+np.random.randn(n1sie,n2sie)*sigma
hdus = pf.PrimaryHDU(Image)
lists = pf.HDUList([hdus])
lists.writeto('../Results_center/Image_'+str(i)+'_'+str(shift)+'_'+str(theta)+'.fits', clobber=True)
#Running SLIT
S,FS = SLIT.Solve.SLIT(Image, Fkappa, kmax, niter, sizeSIE, PSF, 0, scheme = 'FISTA', lvl = lvl)
hdus = pf.PrimaryHDU(S)
lists = pf.HDUList([hdus])
lists.writeto('../Results_center/Source_'+str(i)+'_'+str(shift)+'_'+str(theta)+'.fits', clobber=True)
hdus = pf.PrimaryHDU(FS)
lists = pf.HDUList([hdus])
lists.writeto('../Results_center/Lensed_source_'+str(i)+'_'+str(shift)+'_'+str(theta)+'.fits', clobber=True)
return 0
def test_slope(gamma):
kappa_true = SLIT.Lens.Power_law(xsie+extra/2,ysie+extra/2,n1sie+extra,n2sie+extra,Re, theta, q, gamma_true,1.)
alphax_SIE_true, alphay_SIE_true = SLIT.Lens.alpha_def(kappa_true, n1sie,n2sie, extra)
#Mapping between lens and source IMPORTANT
Fkappa_true = SLIT.Lens.F(kappa_true, n1sie,n2sie, sizeSIE, extra = extra)
hdus = pf.PrimaryHDU(alphax_SIE_true)
lists = pf.HDUList([hdus])
lists.writeto('../Results_slope/alphax_PL_true.fits', clobber=True)
hdus = pf.PrimaryHDU(alphay_SIE_true)
lists = pf.HDUList([hdus])
lists.writeto('../Results_slope/alphay_PL_true.fits', clobber=True)
#Generation of lensed source
I2 = SLIT.Lens.source_to_image(newsource, n1sie ,n2sie , Fkappa_true)
#Noise levels
SNR = 100
sigma = np.sqrt(np.sum(I2**2)/SNR/(nt1*nt2*sizeSIE**2))
print('sigma: ',sigma)
#Convolution by the PSF and generation of the final image
I2 = scp.fftconvolve(I2, PSF, mode = 'same')
for i in range(nsim):
kappa = SLIT.Lens.Power_law(xsie+extra/2,ysie+extra/2,n1sie+extra,n2sie+extra,Re, theta, q, gamma,1.)
alphax_SIE, alphay_SIE = SLIT.Lens.alpha_def(kappa, n1sie,n2sie, extra)
#Mapping between lens and source IMPORTANT
Fkappa = SLIT.Lens.F(kappa, nt1,nt2, sizeSIE, extra = extra)
#Final simulated image
Image = I2+np.random.randn(n1sie,n2sie)*sigma
hdus = pf.PrimaryHDU(Image)
lists = pf.HDUList([hdus])
lists.writeto('../Results_slope/Image_'+str(i)+'_'+str(gamma)+'.fits', clobber=True)
#Running SLIT
S,FS = SLIT.Solve.SLIT(Image, Fkappa, kmax, niter, sizeSIE, PSF, 0, scheme = 'FISTA', lvl = lvl)
hdus = pf.PrimaryHDU(S)
lists = pf.HDUList([hdus])
lists.writeto('../Results_slope/Source_'+str(i)+'_'+str(gamma)+'.fits', clobber=True)
hdus = pf.PrimaryHDU(FS)
lists = pf.HDUList([hdus])
lists.writeto('../Results_slope/Lensed_source_'+str(i)+'_'+str(gamma)+'.fits', clobber=True)
return 0
| 5,537 | 32.97546 | 116 | py |
SLIT | SLIT-master/Tests/Results_center.py | import numpy as np
import matplotlib.pyplot as plt
import SLIT
import pyfits as pf
import matplotlib.cm as cm
import os
import glob
nsim =49
ranges = np.array([0.0,0.1,0.2,0.3,0.4,0.5])#np.linspace(0,1,11)
Truth = pf.open('IMG2.fits')[0].data
sigma = 0.00119
Sources = 0
FSs = 0
thetas = np.zeros((nsim, ranges.size))
Reses = np.zeros((nsim, ranges.size))
SDRs = np.zeros((nsim, ranges.size))
shifts = np.zeros((nsim, ranges.size))
L1s = np.zeros((nsim, ranges.size))
xcs = np.zeros((nsim, ranges.size))
ycs = np.zeros((nsim, ranges.size))
Sources = 0
Images = 0
FSs = 0
x = 0
for i in ranges:
y = 0
Images = 0
Sources = 0
FSs = 0
for sim in range(nsim):
files = glob.glob('../Results_center/Source_'+str(sim)+'_'+str(i)+'_*.fits')[0]
# print(files)
Source = pf.open(files)[0].data
FS = pf.open('../Results_center/Lensed_source'+files[24:])[0].data
Image = pf.open('../Results_center/Image'+files[24:])[0].data
FSs+=FS
Sources += Source
Images += Image
Reses[y,x] = (np.std(Image-FS)/sigma)
SDRs[y,x] = 10*np.log10(np.sqrt(np.sum(Truth[Truth!=0]**2))/np.sqrt(np.sum((Source[Truth!=0]-Truth[Truth!=0])**2)))
L1s[y,x] = np.sum(np.abs(SLIT.tools.wave_transform(Source, 6)))
log = files.split('_')
xcs[y,x] = i*np.cos(np.float(log[4][:6]))
ycs[y,x] = i*np.sin(np.float(log[4][:6]))
thetas[y,x] = np.float(np.float(log[4][:6]))*180./np.pi
shifts[y,x] = np.float(i)
y+=1
## plt.figure(10)
## plt.imshow(Images-FSs+np.random.randn(FSs.shape[0],FSs.shape[1])*sigma*nsim, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Residuals'+str(i))
## plt.figure(30)
## plt.imshow(Images, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Images'+str(i))
## plt.figure(20)
## plt.imshow(Sources, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Source'+str(i))
## plt.figure(40)
## plt.imshow(Sources-Truth*nsim, cmap = cm.gist_stern, interpolation = 'nearest')
## plt.colorbar()
## plt.title('Source Residuals'+str(i))
## plt.show()
x+=1
mean_SDRs = np.mean(SDRs,axis = 0)
sigma_SDRs = np.std(SDRs,axis = 0)
mean_L1s = np.mean(L1s,axis = 0)
sigma_L1s = np.std(L1s,axis = 0)
mean_Reses = np.mean(Reses,axis = 0)
sigma_Reses = np.std(Reses,axis = 0)
plt.figure(-1)
sc1 = plt.scatter(xcs.flatten(), ycs.flatten(), c = SDRs.flatten())
plt.colorbar(sc1)
plt.figure(0)
plt.errorbar(ranges, mean_SDRs, yerr = sigma_SDRs)
plt.xlabel('Shifts')
plt.ylabel('SDR')
plt.axis([-0.1,ranges.max()+0.1,mean_SDRs.min()-2*sigma_SDRs[mean_SDRs==mean_SDRs.min()],mean_SDRs.max()+2*sigma_SDRs[mean_SDRs==mean_SDRs.max()]])
plt.figure(1)
plt.errorbar(ranges, mean_Reses, yerr = sigma_Reses)
plt.axis([-0.1,ranges.max()+0.1,mean_Reses.min()-2*sigma_Reses[mean_Reses==mean_Reses.min()],mean_Reses.max()+2*sigma_Reses[mean_Reses==mean_Reses.max()]])
plt.xlabel('Shifts')
plt.ylabel('Residuals')
plt.figure(2)
plt.errorbar(ranges, mean_L1s, yerr = sigma_L1s)
plt.xlabel('Shifts')
plt.ylabel('L1')
plt.figure(3)
sc0 = plt.scatter(thetas.flatten(), SDRs.flatten(), c = np.array(shifts.flatten()))
plt.colorbar(sc0)
plt.xlabel('theta')
plt.ylabel('SDR')
plt.figure(4)
sc = plt.scatter(thetas.flatten(), Reses.flatten(), c = np.array(shifts.flatten()))
plt.colorbar(sc)
plt.xlabel('theta')
plt.ylabel('Residuals')
plt.show()
plt.close()
| 3,528 | 30.508929 | 155 | py |
SLIT | SLIT-master/SLIT/Solve.py | #from __future__ import division
import wave_transform as mw
import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import matplotlib.cm as cm
from scipy import signal as scp
import scipy.ndimage.filters as med
import MuSCADeT as wine
from numpy import linalg as LA
import multiprocess as mtp
from pathos.multiprocessing import ProcessingPool as Pool
import Lens
import warnings
import tools
warnings.simplefilter("ignore")
##SLIT: Sparse Lens Inversion Technique
def SLIT(Y, Fkappa, kmax, niter, size, PSF, PSFconj, S0 = [0], levels = [0], scheme = 'FB',
mask = [0], lvl = 0, weightS = 1, noise = 'gaussian', tau = 0, verbosity = 0):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations of the algorithm.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
n1,n2 = np.shape(Y)
PSFconj = PSF.T
#Size of the source
ns1,ns2 = n1*size, n2*size
#Number of starlet scales in source plane
if lvl ==0:
lvl = np.int(np.log2(ns2))
else:
lvl = np.min([lvl,np.int(np.log2(ns2))])
lvlg = np.int(np.log2(n2))
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
#Noise in image plane
if noise == 'gaussian':
print('noise statistic is gaussain')
sigma0 = tools.MAD(Y)
print('sigma: ', sigma0)
if noise == 'poisson':
print('noise statistic is poisson')
sigma0 = tools.MAD_poisson(Y,tau)
if (noise == 'G+P') or (noise == 'P+G'):
print('noise statistic is poisson and gaussain mixture')
sigma0 = np.sqrt(tools.MAD_poisson(Y,tau, lvlg)**2+tools.MAD(Y)**2)
plt.imshow(sigma0); plt.colorbar(); plt.show()
#Mapping of an all-at-one image to source plane
lensed = lens_one(Fkappa, n1,n2, size)
#estimation of the frame of the image in source plane
supp = np.zeros((lvl,lensed.shape[0],lensed.shape[1]))
supp[:,lensed/lensed ==1] =1
supp = 1
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def Lens_op2(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed, square = 1)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
return tools.wave_transform(x, lvl, newwave = 1)
def inverse(x):
return tools.iuwt(x)
#Forward operator
def F_op(X):
return PSF_apply(F_apply(X))
#Inverse operator
def I_op(X):
return Finv_apply(PSFT_apply(X))
#Forward operator
def FW_op(X):
return PSF_apply(F_apply(inverse(X)))
#Inverse operator
def IW_op(X):
return transform(Finv_apply(PSFT_apply(X)))
#Regularisation (Backward term)
def reg0(X):
return tools.Hard(X, levels, (ks), supp=supp)
def reg00(X):
return tools.Hard_Threshold(X, transform, inverse,levels, (ks), M = [0], supp=supp)
def reg1(X):
return tools.Soft(X, levels*weightS, kmax, supp=supp, Kill = 0)
def reg_plus(X):
Xreg = np.copy(X)
Xreg[X<0] = 0
return Xreg
def reg_supp(X):
X[X < 0] = 0
return X*supp
def reg_filter(X):
return tools.mr_filter(X,levels,ks,10,transform, inverse, I_op(sigma0*np.ones((n1,n2))), lvl = lvl, supp = supp)
#Noise simulations to estimate noise levels in source plane
if np.sum(levels)==0:
print('Calculating noise levels')
#levels = simulate_noise(n1,n2, sigma0, size, I_op, transform, lvl)
levels = level_source(n1,n2,sigma0,size,PSFconj, Lens_op2, lensed, lvl)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels.fits', clobber=True)
def mk_levels(sigma):
return level_source(n1,n2,sigma0,size,PSFconj, Lens_op2, lensed, lvl)
##Compute spectral norms
opwave_norm = spectralNorm(n1,n2,20,1e-10,IW_op,FW_op)
op_norm = spectralNorm(ns1, ns2, 20, 1e-10, F_op, I_op)
wave_norm = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
if scheme == 'Vu':
mu = 1.
tau = 1./(mu*wave_norm**2+0.5*op_norm)
if verbosity == 1:
print(mu,tau)
else:
mu = .5/(opwave_norm)
if verbosity == 1:
print(mu)
if (scheme == 'FISTA'):
repeat = 2
elif (scheme == 'Vu'):
repeat = 1
else:
repeat = 1
#Initialisation
Res1= []
for jr in range(repeat):
trans = (transform(I_op(Y))/levels)*supp
#trans[:,lensed==0] = 0
trans[levels==0] =0
ks0 = np.max(trans)*0.9
print(ks0)
ks=np.copy(ks0)
steps = (ks0-kmax)/(niter-10.)
karg = np.log(kmax/ks0)/(niter-10.)
i = 0
ts = 1
csi = 0
M = [0]
Res1= []
Res2 = []
if np.sum(S0) == 0:
S = np.random.randn(ns1, ns2) * np.median(sigma0)*0
else:
S = S0
Snew = S
alpha =transform(S)
alphanew = np.copy(alpha)
points = 0
while i < niter:
if scheme == 'FB':
print('FB ', i)
ks = ks0*np.exp(i*karg)
ks = np.max([ks, kmax])
S = np.copy(Snew)
Snew = tools.Forward_Backward(Y, S, F_op, I_op, transform, inverse, mu, reg1, pos = 1)
S[S<0] = 0
FS = F_op(Snew)*mask
if (noise == 'G+P') or (noise == 'P+G') and (i<10):
sigma = (tools.MAD(Y)+np.sqrt(FS/tau))
levels = mk_levels(sigma)
elif scheme == 'FISTA':
print('FISTA ', i)
S = np.copy(Snew)
alphanew = np.copy(alpha)
alpha, csi, ts = tools.FISTA(Y, alphanew, F_op, I_op, mu, ts, csi, reg1, transform, inverse, mask = mask)
Snew = inverse(alpha)
FS = F_op(Snew)
elif scheme == 'Vu':
print('Vu ', i)
S = np.copy(Snew)
Snew,alpha = tools.Vu_Primal_dual(Y, S, alpha, mu, tau, F_op, I_op, transform, inverse, reg1, reg_plus)
FS = F_op(Snew)
# plt.imshow(S)
# plt.show()
SDR = tools.SDR(alpha, alphanew)
Res = tools.Res(Y,FS,sigma0)
#Convergence condition
Res1.append(Res)
Res2.append(SDR)
# ks = ks-steps
if i>5:
add = Criteria(i, Res1, Res2)
if add == 0:
points = np.max([0,points-1])
else:
points+=add
if points >= 10:
print('BREAK: algorithm converged at iteration: ', i)
break
i = i+1
if i == niter:
print('BREAK: Maximum number of iterations reached.')
# alpha = transform(S)
weightS = 1./(1.+np.exp(-10.*(levels*kmax-alpha)))
# plt.show()
#Final reconstruction of the source
if np.size(np.shape(sigma0))>2:
sigma0[sigma0==0]=np.mean(sigma0)
if verbosity == 1:
plt.imshow((Y-FS)/(sigma0)); plt.colorbar(); plt.show()
plt.plot(Res1, 'b'); plt.show()
plt.plot(Res2, 'r');
plt.show()
if noise == 'poisson':
plt.subplot(211)
plt.title('S')
plt.imshow(S); plt.colorbar()
plt.show()
return Snew, FS
#############################SLIT MCA for blended lenses############################
def SLIT_MCA(Y, Fkappa, kmax, niter, riter, size,PSF, PSFconj, lvlg = 0, lvls = 0, noise = 'gaussian', tau =0, levels = [0], WS = 1, WG = 1, mask = [0,0], Ginit=0, Kills = 0, Killg = 0):
##DESCRIPTION:
## Function that estimates the source and lens light profiles from an image of a
## strong lens system
##
##INPUTS:
## -img: a 2-D image of a lensed source given as n1xn2 numpy array.
## -Fkappa: an array giving the mapping between lens and source. This array is calculated from the lens mass density
## using tools from SLIT.Lens
## -kmax: the detection threshold in units of noise levels. We usualy set this value to 5 to get a 5 sigma
## detection threshold.
## -niter: maximal number of iterations in the main loop over G.
## -riter: maximal number of iterations in the inner loop over S.
## -size: resoluution factor between lens and source grids such thathe size of the output source
## will be n1sizexn2size
## -PSF: the point spread function of the observation provided as a 2D array.
## -PSFconj: The conjugate of the PSF. Usually computed via np.real(np.fft.ifft2(np.conjugate(np.fft.fft2(PSF0[:-1,:-1]))))
## butthe user has to make sure that the conjugate is well centered.
##
##OPTIONS:
## -levels: an array that contains the noise levels at each band of the wavelet decomposition of the source.
## If not provided, the routine will compute the levels and save them in a fits file 'Noise_levels.fits'
## so that they can be used at a later time. This option allows to save time when running the same
## experiment several times.
## -mask: an array of zeros and one with size ns1xns2. The zeros will stand for masked data.
## -Ginit: Educated guedd for the lens galaxy light profile. if set to a 2D numpy array, the array will be used as
## as an initialisation for G.
##
##OUTPUTS:
## -S: the source light profile.
## -G: the convolved lens light profile
## -FS: the lensed version of the estimated source light profile
##
##EXAMPLE:
## S,FS = SLIT(img, Fkappa, 5, 100, 1, PSF, PSFconj)
#Shape of the image
n1,n2 = np.shape(Y)
#Initialisation of the source
ns1= n1*size
ns2 = n2*size
PSFconj = PSF.T
#Number of starlet scales in source and image planes
if lvlg ==0:
lvlg = np.int(np.log2(n2))
else:
lvlg = np.min([lvlg,np.int(np.log2(n2))])
lvls = lvlg
if lvls >np.int(np.log2(ns2)):
print('Error, too many wavelet levels for the source. Choose a smaller value for lvl')
exit
#Masking if required
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
Y = Y*mask
#Noise standard deviation in image plane
if noise == 'gaussian':
print('noise statistic is gaussain')
sigma0 = tools.MAD(Y)
if noise == 'poisson':
print('noise statistic is poisson')
sigma0 = tools.MAD_poisson(Y,tau)
if (noise == 'G+P') or (noise == 'P+G'):
print('noise statistic is poisson and gaussain mixture')
sigma0 = np.sqrt(tools.MAD_poisson(Y,tau, lvlg)**2+tools.MAD(Y)**2)
#Mapping of an all-at-one image
lensed = lens_one(Fkappa, n1,n2, size)
supp = np.zeros((lvls,lensed.shape[0],lensed.shape[1]))
supp[:,lensed/lensed ==1] =1
#Limits of the image plane in source plane
bound = mk_bound(Fkappa, n1,n2, size)
#Useful functions
def Finv_apply(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed)
def Lens_op2(I):
return Lens.image_to_source(I, size, Fkappa, lensed = lensed, square = 1)
def F_apply(Si):
return Lens.source_to_image(Si, n1, n2,Fkappa)
def PSF_apply(i):
return scp.fftconvolve(i,PSF,mode = 'same')
def PSFT_apply(ii):
return scp.fftconvolve(ii,PSFconj,mode = 'same')
def transform(x):
return tools.wave_transform(x, lvlg)
def inverse(x):
return tools.iuwt(x)
#Forward Source operator
def FS_op(X):
return PSF_apply(F_apply(X))
#Inverse Source operator
def IS_op(X):
return Finv_apply(PSFT_apply(X))
#Forward Lens operator
def FG_op(X):
return (PSF_apply(X))
#Inverse Lens operator
def IG_op(X):
return (PSFT_apply(X))
#Regularisation (Backward term)
def regG0(X):
return tools.Hard_Threshold(X, transform, inverse, levelg*kG)
def regS0(X):
return tools.Hard_Threshold(X, transform, inverse, levels*kS)
def regG1(X):
return tools.Soft(X, levelg*weightG, k, supp=supp, Kill = Killg)
def regS1(X):
return tools.Soft(X, levels*weightS, k, supp=supp, Kill = Kills)
def reg_filter(X):
return tools.mr_filter(X,levels,kmax,20,transform, inverse, I_op(sigma0*np.ones((n1,n2))), lvl = lvl, supp = supp)
# Noise levels in image plane in starlet space
levelg = tools.level(n1, n2, lvlg) * sigma0
#Noise simulations to estimate noise levels in source plane
if np.sum(levels)==0:
print('Calculating noise levels')
levels = level_source(n1, n2, sigma0, size, PSFconj, Lens_op2, lensed, lvls)
#levels[:,lensed ==0] = np.max(levels*10)
#Saves levels
hdus = pf.PrimaryHDU(levels)
lists = pf.HDUList([hdus])
lists.writeto('Noise_levels_MCA.fits', clobber=True)
#Computationt of spectral norms
FS_norm = spectralNorm(ns1,ns2,20,1e-10,FS_op,IS_op)
Star_norm_im = spectralNorm(n1,n2,20,1e-10,transform,inverse)
Star_norm_s = spectralNorm(ns1,ns2,20,1e-10,transform,inverse)
muG = 1./(2*Star_norm_im)**2
muS = 1./(2*Star_norm_s*FS_norm)**2
print(muS, muG)
weightS = WS
weightG = WG
#Reweighting loop
for it in range(3):
#Initialisations
i = 0
K_s = np.zeros(niter)
tg = 1
ts = 1
FS = 0
FG = 0
G = np.zeros((n1, n2))
S = np.zeros((ns1, ns2))
Gnew = np.copy(G)
Snew = np.copy(S)
alphaS = transform(S)
csiS = np.copy(alphaS)
alphaG = transform(G)
csiG = np.copy(alphaG)
k = tools.MOM(transform(IS_op(Y)), transform(IG_op(Y)), levels, levels)
step = (k-kmax)/(niter-5)
print(k)
#Beginning of main loop
while i < niter:
print('main loop: ',i)
kMOM = tools.MOM(alphaS, alphaG, levels, levelg)
k = k-step
if kMOM<k:
k = kMOM
print('MOMMYs Threshold: ', k)
k = np.max([kmax, k])
DS = Y-FG
ts = 1
pointS = 0
Res1S = []
Res2S = []
for j in range(riter):
S = np.copy(Snew)
alphaS, csiS, ts = tools.FISTA(DS, alphaS, FS_op, IS_op, muS, ts, csiS, regS1, transform, inverse, pos = 1)
Snew = inverse(alphaS)
FS = FS_op(Snew)
Res1S.append(tools.Res(S,Snew,sigma0))
Res2S.append(tools.SDR(S,Snew))
pointS = Criteria(j,Res1S,Res2S, pointS)
DG = Y-FS
tg = 1
pointG = 0
Res1G = []
Res2G = []
for j2 in range(riter):
G = np.copy(Gnew)
# G, M = tools.Forward_Backward(DG, G, FG_op, IG_op, muG, reg_filter, pos=1)
alphaG, csiG, tg = tools.FISTA(DG, alphaG, FG_op, IG_op, muG, tg, csiG, regG1, transform, inverse, pos = 1)
Gnew = inverse(alphaG)
FG = FG_op(Gnew)
Res1S.append(tools.Res(S,Snew,sigma0))
Res2S.append(tools.SDR(S,Snew))
pointS = Criteria(j,Res1S,Res2S, pointS)
#
Res1 = tootls.Res(Y, FS+FG, sigma0)
Res2 = (tools.SDR(Gnew, G)+tools.SDR(Snew, S))/2.
K_s[i] = np.mean(newres)
res = np.copy(newres)
if i>5:
points = Criteria(i,Res1, Res2, points)
if points >= 10:
print('BREAK: algorithm converged at iteration: ', i)
break
plt.figure(0)
plt.subplot(221)
plt.title('S')
plt.imshow(Snew)
plt.subplot(222)
plt.title('FS')
plt.imshow(FS)
plt.subplot(223)
plt.title('FG')
plt.imshow(FG)
plt.subplot(224)
plt.title('Residuals')
plt.imshow(Y-FS-FG)
plt.savefig('Res'+str(i)+'.png')
i +=1
#Weighting
weightS = 1./(1.+np.exp(-10.*(levels*kmax-alphaS)))
weightG = 1./(1.+np.exp(-10.*(levelg*kmax-alphaG)))
# S, FS = SLIT(Y-G, Fkappa, kmax, niter, size, PSF, PSFconj, levels = [0], scheme = 'FISTA', mask = mask, lvl = lvls)
#Final reconstructions
plt.show()
plt.figure(1)
plt.subplot(211)
plt.plot(Res1)
plt.subplot(212)
plt.plot(Res2)
plt.show()
return Snew, FS,Gnew, FG
################################### TOOLS ###################################
def Criteria(i, Res1, Res2):
# if np.abs(Res1[-1]-1) < 0.01:
# point_res += 1
if (np.abs(Res2[-1] - Res2[-2]) < 0.01*np.abs(Res2[0]-Res2[1])) and (np.abs(Res1[-1] - Res1[-2]) < 0.001*np.abs(Res1[0]-Res1[1])):
points = 1
else:
points = 0
return points
def plot_cube(cube):
##DESCRIPTION:
## Plotting device that displays layers of a cube in different subplot panels.
##
##INPUTS:
## -cube: Cube for which to plot the layers with shape (n,n1,n2) with n, the number of layers and n1xn2, the number of pixels.
##
##OUTPUTS:
## -None
n,n1,n2 = np.shape(cube)
i = n/2
if i == n/2.+0.5:
i+=1
j = 2
for k in range(n):
plt.subplot(i,j,k)
plt.imshow(cube[k,:,:]); plt.colorbar()
return None
def level_source(n1,n2,sigma,size,PSFT, Lens_op2, lensed, lvl):
ns1,ns2 = n1*size, n2*size
ones = np.ones((n1,n2))
lensed[lensed == 0] = 1
noise = ones*sigma
Hnoise = np.sqrt(scp.fftconvolve(noise**2, PSFT**2, mode = 'same'))#noise*np.sqrt(np.sum(PSFT**2))##
Hnoise[np.isnan(Hnoise)==1] = 0
FHnoise_old = Lens_op2(Hnoise)
FHnoise = np.copy(FHnoise_old)
FHnoise[FHnoise_old==0] = np.mean(FHnoise_old)*10.
dirac = np.zeros((ns1,ns2))
dirac[ns1/2,ns2/2] = 1
wave_dirac = tools.wave_transform(dirac, lvl)
levels = np.zeros(wave_dirac.shape)
for i in range(lvl):
if np.size(noise.shape) > 2:
lvlso = (scp.fftconvolve(FHnoise[i, :, :] ** 2, wave_dirac[i, :, :] ** 2,
mode='same'))
else:
lvlso = scp.fftconvolve(FHnoise ** 2, wave_dirac[i,:,:] ** 2,
mode='same')
#lvlso[lensed == 0] = np.max(lvlso)*100000000
levels[i, :, :] = np.sqrt(np.abs(lvlso))
levels[i,lvlso == 0] = 0
return levels
def spectralNorm(n1,n2,Niter,tol,f,finv):
##DESCRIPTION:
## Function that estimates the source light profile from an image of a lensed source given the mass density profile.
##
##INPUTS:
## -nx,ny: shape of the input
## -nz: number of decomposition scales (if the operator tis a multiscale decomposition for instance)
## -Niter: number of iterations
## -tol: tolerance error as a stopping criteria
## -f: operator
## -finv: inverse operator
##
##OUTPUTS:
## -SspNorm: The spectral norm of the operator
#### Initilize array with random numbers ###
matA = np.random.randn(n1,n2)
### Normalize the input ###
spNorm = LA.norm(matA)
matA /= spNorm
matA = np.array(matA)
it = 0
err = abs(tol)
while it < Niter and err >= tol:
### Apply operator ###
wt = f(matA)
### Apply joint operator ###
matA = finv(wt)
### Compute norm ###
spNorm_new = LA.norm(matA)
matA /= spNorm_new
err = abs(spNorm_new - spNorm)/spNorm_new
spNorm = spNorm_new
it += 1
return spNorm
def lens_one(Fkappa, n1,n2,size):
##DESCRIPTION:
## Function that maps an all at one image to source plane.
##
##INPUTS:
## -Fkappa: the mapping between source and image planes
## -n1,n2: the shape of the image.
## -size: the factor that scales the shape of the source relative to the shape of the image
##
##OUTPUTS:
## -lensed: the projection to source plane of an all at aone image.
dirac = np.ones((n1,n2))
lensed = Lens.image_to_source(dirac, size,Fkappa,lensed = [0])
return lensed
def mk_bound(Fkappa, n1,n2,size):
##DESCRIPTION:
## Function that returns the support of the lens image in source plane.
##
##INPUTS:
## -Fkappa: the mapping between source and image planes
## -n1,n2: the shape of the image.
## -size: the factor that scales the shape of the source relative to the shape of the image
##
##OUTPUTS:
## -lensed: the projection to source plane of an all at aone image.
dirac = np.ones((n1,n2))
lensed = Lens.image_to_source_bound(dirac, size,Fkappa,lensed = [0])
bound = lensed/lensed
bound[lensed==0]=0
return bound
def mk_simu(n1,n2,lvl,size, sigma, I_op, transform, n):
storage = np.zeros((lvl,n1*size, n2*size, n))
for i in range(n):
noise = np.random.randn(n1,n2)*sigma
noise_lens = I_op(noise)
noise_lens[noise_lens ==0] = 1
storage[:,:,:,i] = transform(noise_lens)
return storage
def simulate_noise(n1,n2, sigma, size, I_op, transform, lvl, Npar = np.int(mtp.cpu_count()/2)):
##DESCRIPTION:
## Simulates noise levels in source plane from lensing operator and convolution operator.
##
##INPUTS:
## -n1,n2: the shape of the images for which to simulate noise maps.
## -size: scaling factor for the shape of the source.
## -Fkappa: Projection operator between lens and source plane.
## -lensed: mapping of an all at one image to source plane.
## -PSFconj: the conjugate of the PSF
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
n = 500
if Npar>mtp.cpu_count():
Npar = mtp.cpu_count()
ns1,ns2 = n1*size, n2*size
# lvl = np.int(np.log2(ns1))
w_levels = np.zeros((lvl,ns1,ns2))
p = Pool(Npar)
storage = mk_simu(n1,n2,lvl,size, sigma, I_op, transform,n)
w_levels = np.std(storage, axis = 3)
# w_levels[0,:,:] = w_levels[0,:,:]*6/5
return w_levels
| 24,278 | 33.004202 | 186 | py |
SLIT | SLIT-master/SLIT/wave_transform.py | import numpy as np
import scipy.signal as cp
import matplotlib.pyplot as plt
import scipy.ndimage.filters as sc
def symmetrise(img, size):
n3, n4 = np.shape(img)
n1,n2 = size
img[:(n3-n1)/2, :] = np.flipud(img[(n3-n1)/2:(n3-n1),:])
img[:,:(n4-n2)/2] = np.fliplr(img[:,(n4-n2)/2:(n4-n2)])
img[(n3+n1)/2:,:] = np.flipud(img[n1:(n3+n1)/2,:])
img[:,(n4+n2)/2:] = np.fliplr(img[:,n2:(n4+n2)/2])
return img
def fft_convolve(X,Y, inv = 0):
XF = np.fft.rfft2(X)
YF = np.fft.rfft2(Y)
# YF0 = np.copy(YF)
# YF.imag = 0
# XF.imag = 0
if inv == 1:
# plt.imshow(np.real(YF)); plt.colorbar(); plt.show()
YF = np.conj(YF)
SF = XF*YF
S = np.fft.irfft2(SF)
n1,n2 = np.shape(S)
S = np.roll(S,-n1/2+1,axis = 0)
S = np.roll(S,-n2/2+1,axis = 1)
return np.real(S)
def wave_transform(img, lvl, Filter = 'Bspline', newwave = 1, convol2d = 0):
mode = 'nearest'
lvl = lvl-1
sh = np.shape(img)
if np.size(sh) ==3:
mn = np.min(sh)
wave = np.zeros([lvl+1,sh[1], sh[1],mn])
for h in np.linspace(0,mn-1, mn):
if mn == sh[0]:
wave[:,:,:,h] = wave_transform(img[h,:,:],lvl+1, Filter = Filter)
else:
wave[:,:,:,h] = wave_transform(img[:,:,h],lvl+1, Filter = Filter)
return wave
n1 = sh[1]
n2 = sh[1]
if Filter == 'Bspline':
h = [1./16, 1./4, 3./8, 1./4, 1./16]
else:
h = [1./4,1./2,1./4]
n = np.size(h)
h = np.array(h)
if n+2**(lvl-1)*(n-1) >= np.min([n1,n2])/2.:
lvl = np.int_(np.log2((n1-1)/(n-1.))+1)
c = img
## wavelet set of coefficients.
wave = np.zeros([lvl+1,n1,n2])
for i in np.linspace(0,lvl-1,lvl):
newh = np.zeros((1,n+(n-1)*(2**i-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
######Calculates c(j+1)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(c, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(c,newh[0,:],axis = 0, mode =mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode =mode)
if newwave ==1:
###### hoh for g; Column convolution
if convol2d == 1:
hc = cp.convolve2d(cnew, H, mode='same', boundary='symm')
else:
hc = sc.convolve1d(cnew,newh[0,:],axis = 0, mode = mode)
###### hoh for g; Line convolution
hc = sc.convolve1d(hc,newh[0,:],axis = 1, mode = mode)
###### wj+1 = cj-hcj+1
wave[i,:,:] = c-hc
else:
###### wj+1 = cj-cj+1
wave[i,:,:] = c-cnew
c = cnew
wave[i+1,:,:] = c
return wave
def iuwt(wave, convol2d =0):
mode = 'nearest'
lvl,n1,n2 = np.shape(wave)
h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
n = np.size(h)
cJ = np.copy(wave[lvl-1,:,:])
for i in np.linspace(1,lvl-1,lvl-1):
newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)
cJ = cnew+wave[lvl-1-i,:,:]
return np.reshape(cJ,(n1,n2))
| 3,715 | 25.169014 | 81 | py |
SLIT | SLIT-master/SLIT/tools.py | import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
from scipy import signal as scp
import gaussian as gs
import scipy.ndimage.filters as sc
import scipy.ndimage.filters as med
import scipy.signal as cp
def MOM(A, B, levelA, levelB):
A = A[:-1,:,:]
B = B[:-1,:,:]
levelA = levelA[:-1,:,:]
levelB = levelB[:-1,:,:]
Amax = np.max(A[levelA!=0]/levelA[levelA!=0])
Bmax = np.max(B[levelB!=0]/levelB[levelB!=0])
k = np.min([Amax, Bmax])
return k+0.1*np.abs(Amax-Bmax)
def MAD(x,n=3):
##DESCRIPTION:
## Estimates the noise standard deviation from Median Absolute Deviation
##
##INPUTS:
## -x: a 2D image for which we look for the noise levels.
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
x = wave_transform(x, np.int(np.log2(x.shape[0])))[0,:,:]
meda = med.median_filter(x,size = (n,n))
medfil = np.abs(x-meda)#np.median(x))
sh = np.shape(x)
sigma = 1.48*np.median((medfil))
return sigma
def MAD_box(x, tau):
n1,n2 = x.shape
xw = wave_transform(x,2)[0,:,:]
winsize = 6
xw_pad = np.pad(xw, ((winsize/2, winsize/2),(winsize/2, winsize/2)), mode = 'symmetric')
sigma = np.zeros((xw.shape))
for i in range(n1):
for j in range(n2):
area = xw_pad[i+winsize-winsize/2:i+winsize+winsize/2,j+winsize-winsize/2:j+winsize+winsize/2]
sigma[i,j] = 1.48*np.median(np.abs(area-np.median(area)))
return sigma
def MAD_poisson(x,tau,lvl):
##DESCRIPTION:
## Estimates the noise standard deviation from Median Absolute Deviation
##
##INPUTS:
## -x: a 2D image for which we look for the noise levels.
##
##OPTIONS:
## -n: size of the median filter. Default is 3.
##
##OUTPUTS:
## -S: the source light profile.
## -FS: the lensed version of the estimated source light profile
n1,n2 = np.shape(x)
x0 = np.copy(x)
def transform(i):
return wave_transform(i,lvl)
levels = level(n1,n2,lvl)*MAD(x)
new_x = np.copy(x)
new_x, y = mr_filter(new_x,levels, 8, 20, transform, iuwt, MAD(x), lvl = lvl)
sigma = np.sqrt(np.abs(new_x)/tau)
return sigma
def level_poisson(n1,n2, lvl,transform,sigma):
dirac = np.zeros((n1,n2))
dirac[n1/2,n2/2] = 1
wave_dirac = transform(dirac)
levels = np.zeros(wave_dirac.shape)
for i in range(lvl):
if np.size(sigma.shape) > 2:
lvlso = (scp.fftconvolve(sigma[i, :, :] ** 2, wave_dirac[i, :, :] ** 2,
mode='same'))
else:
lvlso = scp.fftconvolve(sigma ** 2, wave_dirac[i,:,:] ** 2,
mode='same')
levels[i, :, :] = np.sqrt(np.abs(lvlso))
return levels
def Forward_Backward(Y, X, F_op, I_op, transform, inverse, mu, reg, pos = 1, subiter = 0):
R = mu*I_op(Y-F_op(X))
Xnew = np.copy(X+R)
Xnew = inverse(reg(transform(Xnew)))
return Xnew
def Vu_Primal_dual(Y, X, Z, mu, tau, F_op, I_op, transform, inverse, reg1, reg2):
Xtemp = X + tau*(I_op(Y-F_op(X))-inverse(Z))
Xnew = reg2(Xtemp)
Ztemp = Z + mu*transform(2*Xnew-X)
Znew = Ztemp-reg1(Ztemp)
return Xnew, Znew
def SDR(X, Y):
return 10*np.log10(np.sqrt(np.sum(X[X!=0]**2))/np.sqrt(np.sum((Y[X!=0]-X[X!=0])**2)))
def Res(X,Y,sigma):
return np.sqrt(np.sum(((X-Y)/sigma)**2)/X.size)#np.std((X-Y)**2/sigma**2)
def FISTA(Y, alphaX, F_op, I_op, mu, ts, csi, reg, transform, inverse, pos = 1, mask = 1):
S = inverse(alphaX)
R = mu*I_op(Y-F_op(S)*mask)
alpha = transform(R)+csi
alpha = reg(alpha)
tsnew = (1.+np.sqrt(1.+4.*ts**2))/2.
csi = alpha+((ts-1)/tsnew)*(alpha-alphaX)
return alpha, csi, tsnew
def Soft(X, level, k, supp =1, Kill = 0):
Xnew = np.sign(X)*(np.abs(X)-level*(k))
Xnew[np.where((np.abs(X)-level*(k))<0)] = 0
Xnew[0,:,:] = np.sign(X[0,:,:]) * (np.abs(X[0,:,:]) - level[0,:,:] * (k+1))
Xnew[0,np.where((np.abs(X[0,:,:]) - level[0,:,:] * (k+1)) < 0)] = 0
if Kill == 1:
Xnew[-1,:,:] = 0
else:
Xnew[-1, :, :] = X[-1,:,:]
#print(Xnew.shape, supp.shape)
# Xnew = Xnew*supp
return Xnew
def level(n1, n2, lvl):
##DESCRIPTION:
## Estimates the noise levels in starlet space in image plane.
##
##INPUTS:
## -n1,n2: shape of the image for which to get noise levels
##
##OUTPUTS:
## -levels: units of noise levels at each scale and location of a starlet transform
dirac = np.zeros((n1, n2))
# lvl = np.int(np.log2(n1))
dirac[n1 / 2, n2 / 2] = 1
wave_dirac = wave_transform(dirac, lvl, newwave=0)
wave_sum = np.sqrt(np.sum(np.sum(wave_dirac ** 2, 1), 1))
levels = np.multiply(np.ones((lvl, n1, n2)).T, wave_sum).T
return levels
def Soft_Threshold(X, transform, inverse, level, k, supp =1, Kill = 0):
X = transform(X)
alpha = wave_transform(X,Xw.shape[0],newwave = 0)
M = np.zeros(alpha.shape)
M[np.abs(alpha)-level*k>0] = 1
M[0,:,:] = 0
# M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
Xnew = np.sign(X)*(np.abs(X)-level*k)
Xnew = Xnew*M
if Kill ==1:
Xnew[-1, :, :] = 0
else:
Xnew[-1,:,:] = X[-1,:,:]
Xnew = Xnew*supp
return inverse(Xnew)
def Hard(X, level, k, supp=1):
Xnew = np.copy(X)
Xnew[np.where((np.abs(X)-level*k)<0)] = 0
Xnew[-1,:,:] = X[-1,:,:]
Xnew = Xnew*supp
## plt.figure(0)
## plot_cube(X)
## plt.figure(1)
## plot_cube(Xnew)
## plt.show()
return Xnew
def Hard_Threshold(X, transform, inverse, level, k, supp=1, M = [0]):
Xw = transform(X)
if np.sum(M) == 0:
alpha = wave_transform(X,Xw.shape[0],newwave = 0)
M = np.zeros(alpha.shape)
M[(np.abs(alpha)-level*k)>0] = 1
M[0,:,:] = 0
M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
Xnew=M*Xw
Xnew[-1,:,:] = Xw[-1,:,:]
Xnew = Xnew*supp
return inverse(Xnew), M
def mr_filter(Y, level, k, niter, transform, inverse, sigma, lvl = 6, Soft = 0, pos = 1, supp = 1):
Xnew = 0
alpha = wave_transform(Y, lvl, newwave=0)
M = np.zeros(alpha.shape)
M[np.abs(alpha)-level*k>0] = 1
M[0,:,:] = 0
# M[0,np.abs(alpha[0,:,:]) - level[0,:,:] * (k+1) > 0] = 1
M[-1,:,:] =1
i=0
while i < niter:
R = Y-Xnew
if np.std(R/sigma)<1.1:
print('limit: ', i)
break
# if Soft == True :
# Rnew= Soft_threshold(R, transform, inverse, level,k)
# else:
# Rnew, m0 = Hard_Threshold(R, transform, inverse, level,k)
Rnew = inverse(transform(R)*M*supp)
Xnew = Xnew+Rnew
if pos == True:
Xnew[Xnew < 0] = 0
i = i+1
return (Xnew), M
def wave_transform(img, lvl, Filter = 'Bspline', newwave = 1, convol2d = 0):
mode = 'nearest'
lvl = lvl-1
sh = np.shape(img)
if np.size(sh) ==3:
mn = np.min(sh)
wave = np.zeros([lvl+1,sh[1], sh[1],mn])
for h in np.linspace(0,mn-1, mn):
if mn == sh[0]:
wave[:,:,:,h] = wave_transform(img[h,:,:],lvl+1, Filter = Filter)
else:
wave[:,:,:,h] = wave_transform(img[:,:,h],lvl+1, Filter = Filter)
return wave
n1 = sh[1]
n2 = sh[1]
if Filter == 'Bspline':
h = [1./16, 1./4, 3./8, 1./4, 1./16]
else:
h = [1./4,1./2,1./4]
n = np.size(h)
h = np.array(h)
lvl = np.min([lvl,np.int(np.log2(n2))])
c = img
## wavelet set of coefficients.
wave = np.zeros([lvl+1,n1,n2])
for i in np.linspace(0,lvl-1,lvl):
newh = np.zeros((1,n+(n-1)*(2**i-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
######Calculates c(j+1)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(c, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(c,newh[0,:],axis = 0, mode =mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode =mode)
if newwave ==1:
###### hoh for g; Column convolution
if convol2d == 1:
hc = cp.convolve2d(cnew, H, mode='same', boundary='symm')
else:
hc = sc.convolve1d(cnew,newh[0,:],axis = 0, mode = mode)
###### hoh for g; Line convolution
hc = sc.convolve1d(hc,newh[0,:],axis = 1, mode = mode)
###### wj+1 = cj-hcj+1
wave[i,:,:] = c-hc
else:
###### wj+1 = cj-cj+1
wave[i,:,:] = c-cnew
c = cnew
wave[i+1,:,:] = c
return wave
def iuwt(wave, convol2d =0):
mode = 'nearest'
lvl,n1,n2 = np.shape(wave)
h = np.array([1./16, 1./4, 3./8, 1./4, 1./16])
n = np.size(h)
cJ = np.copy(wave[lvl-1,:,:])
for i in np.linspace(1,lvl-1,lvl-1):
newh = np.zeros((1,n+(n-1)*(2**(lvl-1-i)-1)))
newh[0,np.int_(np.linspace(0,np.size(newh)-1,len(h)))] = h
H = np.dot(newh.T,newh)
###### Line convolution
if convol2d == 1:
cnew = cp.convolve2d(cJ, H, mode='same', boundary='symm')
else:
cnew = sc.convolve1d(cJ,newh[0,:],axis = 0, mode = mode)
###### Column convolution
cnew = sc.convolve1d(cnew,newh[0,:],axis = 1, mode = mode)
cJ = cnew+wave[lvl-1-i,:,:]
return np.reshape(cJ,(n1,n2))
def plot_cube(cube):
##DESCRIPTION:
## Plotting device that displays layers of a cube in different subplot panels.
##
##INPUTS:
## -cube: Cube for which to plot the layers with shape (n,n1,n2) with n, the number of layers and n1xn2, the number of pixels.
##
##OUTPUTS:
## -None
n,n1,n2 = np.shape(cube)
i = n/2
if i == n/2.+0.5:
i+=1
j = 2
for k in range(n):
plt.subplot(i,j,k)
plt.imshow(cube[k,:,:]); plt.colorbar()
return None
| 10,239 | 27.444444 | 131 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.