repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
NeuroKit | NeuroKit-master/neurokit2/hrv/intervals_utils.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy
def _intervals_successive(intervals, intervals_time=None, thresh_unequal=10, n_diff=1):
"""Identify successive intervals.
Identification of intervals that are consecutive
(e.g. in case of missing data).
Parameters
----------
intervals : list or ndarray
Intervals, e.g. breath-to-breath (BBI) or rpeak-to-rpeak (RRI)
intervals_time : list or ndarray, optional
Time points corresponding to intervals, in seconds. Defaults to None,
in which case the cumulative sum of the intervals is used.
thresh_unequal : int or float, optional
Threshold at which the difference between time points is considered to
be unequal to the interval, in milliseconds. Defaults to 10.
n_diff: int, optional
The number of times values are differenced.
Can be used to check which values are valid for the n-th difference
assuming successive intervals. Defaults to 1.
Returns
----------
array
An array of True/False with True being the successive intervals.
Examples
----------
.. ipython:: python
import neurokit2 as nk
rri = [400, 500, 700, 800, 900]
rri_time = [0.7, 1.2, 2.5, 3.3, 4.2]
successive_intervals = nk.intervals_successive(rri, rri_time)
successive_intervals
rri = [400, 500, np.nan, 700, 800, 900]
successive_intervals = nk.intervals_successive(rri)
successive_intervals
"""
# Convert to numpy array
intervals = np.array(intervals)
if intervals_time is None:
intervals_time = np.nancumsum(intervals / 1000)
else:
intervals_time = np.array(intervals_time).astype(float)
intervals_time[np.isnan(intervals)] = np.nan
diff_intervals_time_ms = np.diff(intervals_time, n=n_diff) * 1000
abs_error_intervals_ref_time = abs(
diff_intervals_time_ms - np.diff(intervals[1:], n=n_diff - 1)
)
successive_intervals = abs_error_intervals_ref_time <= thresh_unequal
return np.array(successive_intervals)
def _intervals_time_uniform(intervals_time, decimals=3):
"""Check whether timestamps are uniformly spaced.
Useful for determining whether intervals have been interpolated.
Parameters
----------
intervals_time : list or array, optional
List or numpy array of timestamps corresponding to intervals, in seconds.
decimals : int, optional
The precision of the timestamps. The default is 3.
Returns
----------
bool
Whether the timestamps are uniformly spaced
"""
return len(np.unique(np.round(np.diff(intervals_time), decimals=decimals))) == 1
def _intervals_sanitize(intervals, intervals_time=None, remove_missing=True):
"""**Interval input sanitization**
Parameters
----------
intervals : list or array
List or numpy array of intervals, in milliseconds.
intervals_time : list or array, optional
List or numpy array of timestamps corresponding to intervals, in seconds.
remove_missing : bool, optional
Whether to remove NaNs and infinite values from intervals and timestamps.
The default is True.
Returns
-------
intervals : array
Sanitized intervals, in milliseconds.
intervals_time : array
Sanitized timestamps corresponding to intervals, in seconds.
intervals_missing : bool
Whether there were missing intervals detected.
Examples
---------
.. ipython:: python
import neurokit2 as nk
ibi = [500, 400, 700, 500, 300, 800, 500]
ibi, ibi_time, intervals_missing = intervals_sanitize(ibi)
"""
if intervals is None:
return None, None
else:
# Ensure that input is numpy array
intervals = np.array(intervals)
if intervals_time is None:
# Impute intervals with median in case of missing values to calculate timestamps
imputed_intervals = np.where(
np.isnan(intervals), np.nanmedian(intervals, axis=0), intervals
)
# Compute the timestamps of the intervals in seconds
intervals_time = np.nancumsum(imputed_intervals / 1000)
else:
# Ensure that input is numpy array
intervals_time = np.array(intervals_time)
# Confirm that timestamps are in seconds
successive_intervals = _intervals_successive(intervals, intervals_time=intervals_time)
if np.all(successive_intervals) is False:
# Check whether intervals appear to be interpolated
if not _intervals_time_uniform(intervals_time):
# If none of the differences between timestamps match
# the length of the R-R intervals in seconds,
# try converting milliseconds to seconds
converted_successive_intervals = _intervals_successive(
intervals, intervals_time=intervals_time / 1000
)
# Check if converting to seconds increased the number of differences
# between timestamps that match the length of the R-R intervals in seconds
if len(converted_successive_intervals[converted_successive_intervals]) > len(
successive_intervals[successive_intervals]
):
# Assume timestamps were passed in milliseconds and convert to seconds
intervals_time = intervals_time / 1000
intervals_missing = _intervals_missing(intervals, intervals_time)
if remove_missing:
# Remove NaN R-R intervals, if any
intervals_time = intervals_time[np.isfinite(intervals)]
intervals = intervals[np.isfinite(intervals)]
return intervals, intervals_time, intervals_missing
def _intervals_missing(intervals, intervals_time=None):
if len(intervals[np.isfinite(intervals)]) < len(intervals):
return True
elif intervals_time is not None:
successive_intervals = _intervals_successive(intervals, intervals_time=intervals_time)
if not np.all(successive_intervals) and np.any(successive_intervals):
# Check whether intervals appear to be interpolated
if not _intervals_time_uniform(intervals_time):
return True
return False
def _intervals_time_to_sampling_rate(intervals_time, central_measure="mean"):
"""Get sampling rate from timestamps.
Useful for determining sampling rate used to interpolate intervals.
Parameters
----------
intervals_time : list or array, optional
List or numpy array of timestamps corresponding to intervals, in seconds.
central_measure : str, optional
The measure of central tendancy used. Either ``"mean"`` (default), ``"median"``, or ``"mode"``.
Returns
----------
bool
Whether the timestamps are uniformly spaced
"""
if central_measure == "mean":
sampling_rate = float(1 / np.nanmean(np.diff(intervals_time)))
elif central_measure == "median":
sampling_rate = float(1 / np.nanmedian(np.diff(intervals_time)))
else:
sampling_rate = float(1 / scipy.stats.mode(np.diff(intervals_time)))
return sampling_rate
| 7,239 | 34.145631 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_badchannels.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from ..signal import signal_zerocrossings
from ..stats import hdi, mad, standardize
def eeg_badchannels(eeg, bad_threshold=0.5, distance_threshold=0.99, show=False):
"""**Find bad channels**
Find bad channels among the EEG channels.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
bad_threshold : float
The proportion of indices (for instance, the mean, the SD, the skewness, the kurtosis, etc.)
on which an observation is considered an outlier to be considered as bad. The default, 0.5,
means that a channel must score as an outlier on half or more of the indices.
distance_threshold : float
The quantile that defines the absolute distance from the mean, i.e., the z-score for a
value of a variable to be considered an outlier. For instance, .975 becomes
``scipy.stats.norm.ppf(.975) ~= 1.96``. The default value (.99) means that all observations
beyond 2.33 SD from the mean will be classified as outliers.
show : bool
Visualize individual EEG channels with highlighted bad channels. Defaults to False
Returns
-------
list
List of bad channel names
DataFrame
Information of each channel, such as standard deviation (SD), mean, median absolute
deviation (MAD), skewness, kurtosis, amplitude, highest density intervals, number of zero
crossings.
Examples
---------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw")
bads, info = nk.eeg_badchannels(eeg, distance_threshold=0.95, show=False)
"""
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: eeg_badchannels(): the 'mne' module is required for this function"
" to run. Please install it first (`pip install mne`).",
) from e
selection = mne.pick_types(eeg.info, eeg=True)
ch_names = np.array(eeg.ch_names)[selection]
eeg, _ = eeg[selection]
else:
ch_names = np.arange(len(eeg))
results = []
for i in range(len(eeg)):
channel = eeg[i, :]
hdi_values = hdi(channel, ci=0.90)
info = {
"Channel": [i],
"SD": [np.nanstd(channel, ddof=1)],
"Mean": [np.nanmean(channel)],
"MAD": [mad(channel)],
"Median": [np.nanmedian(channel)],
"Skewness": [scipy.stats.skew(channel)],
"Kurtosis": [scipy.stats.kurtosis(channel)],
"Amplitude": [np.max(channel) - np.min(channel)],
"CI_low": [hdi_values[0]],
"CI_high": [hdi_values[1]],
"n_ZeroCrossings": [len(signal_zerocrossings(channel - np.nanmean(channel)))],
}
results.append(pd.DataFrame(info))
results = pd.concat(results, axis=0)
results = results.set_index("Channel")
z = standardize(results)
results["Bad"] = (z.abs() > scipy.stats.norm.ppf(distance_threshold)).sum(axis=1) / len(
results.columns
)
bads = ch_names[np.where(results["Bad"] >= bad_threshold)[0]]
if show:
_plot_eeg_badchannels(eeg, bads, ch_names)
return list(bads), results
def _plot_eeg_badchannels(eeg, bads, ch_names):
# Prepare plot
fig, ax = plt.subplots()
fig.suptitle("Individual EEG channels")
ax.set_ylabel("Voltage (V)")
ax.set_xlabel("Samples")
bads_list = []
for bad in bads:
channel_index = np.where(ch_names == bad)[0]
bads_list.append(channel_index[0])
# Prepare colors for plotting
colors_good = plt.cm.Greys(np.linspace(0, 1, len(eeg)))
colors_bad = plt.cm.autumn(np.linspace(0, 1, len(bads)))
# Plot good channels
for i in range(len(eeg)):
if i not in bads_list:
channel = eeg[i, :]
ax.plot(np.arange(1, len(channel) + 1), channel, c=colors_good[i])
# Plot bad channels
for i, bad in enumerate(bads_list):
channel = eeg[bad, :]
ax.plot(np.arange(1, len(channel) + 1), channel, c=colors_bad[i], label=ch_names[i])
ax.legend(loc="upper right")
return fig
| 4,410 | 32.930769 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_source.py | def eeg_source(raw, src, bem, method="sLORETA", show=False, verbose="WARNING", **kwargs):
"""**Source Reconstruction for EEG data**
Currently only for mne.Raw objects.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
src : mne.SourceSpace
Source space. See :func:`mne_templateMRI()` to obtain it from an MRI template.
bem : mne.Bem
BEM model. See :func:`mne_templateMRI()` to obtain it from an MRI template.
method : str
Can be ``"sLORETA"``, ``"MNE"`` or ``"dSPM"``. See :func:`.mne.minimum_norm.apply_inverse_raw()`.
show : bool
If ``True``, shows the location of the electrodes on the head. See :func:`.mne.viz.plot_alignment()`.
verbose : str
Verbosity level for MNE.
**kwargs
Other arguments to be passed to ``mne.make_forward_solution()`` and
``mne.minimum_norm.make_inverse_operator()`` and ``mne.minimum_norm.apply_inverse_raw()``.
See Also
--------
mne_templateMRI
"""
# Try loading mne
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: mne_templateMRI(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
if "trans" not in kwargs.keys():
trans = "fsaverage" # MNE has a built-in fsaverage transformation
else:
trans = kwargs.pop("trans")
# Setup source space and compute forward
fwd = mne.make_forward_solution(
raw.info, trans=trans, src=src, bem=bem, verbose=verbose, **kwargs
)
# Get noise covariance matrix
noise_cov = mne.compute_raw_covariance(raw, tmin=0, tmax=None)
# Get inverse solution
inverse_operator = mne.minimum_norm.make_inverse_operator(
raw.info, fwd, noise_cov, verbose=verbose, **kwargs
)
src = inverse_operator["src"]
snr = 1.0 # use smaller SNR for raw data
# Compute inverse solution
stc = mne.minimum_norm.apply_inverse_raw(
raw,
inverse_operator,
lambda2=1.0 / snr ** 2,
method=method, # sLORETA method (could also be MNE or dSPM)
verbose=verbose,
**kwargs
)
# Plot
if show is True:
# Check that the locations of EEG electrodes is correct with respect to MRI
# requires PySide2, ipyvtklink and mayavi
mne.viz.plot_alignment(
raw.info,
src=src,
eeg=["original", "projected"],
trans=trans,
mri_fiducials=True,
dig="fiducials",
verbose=verbose,
)
return stc, src
| 2,658 | 31.036145 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_crop.py | import numpy as np
def mne_crop(raw, tmin=0.0, tmax=None, include_tmax=True, smin=None, smax=None):
"""**Crop mne.Raw objects**
This function is similar to ``raw.crop()`` (same arguments), but with a few critical differences:
* It recreates a whole new Raw object, and as such drops all information pertaining to the
original data (which MNE keeps, see https://github.com/mne-tools/mne-python/issues/9759).
* There is the possibility of specifying directly the first and last samples (instead of in
time unit).
Parameters
-----------
raw : mne.io.Raw
Raw EEG data.
path : str
Defaults to ``None``, assuming that the MNE data folder already exists. If not,
specify the directory to download the folder.
tmin : float
See :func:`mne.Raw.crop()`.
tmax : float
See :func:`mne.Raw.crop()`.
include_tmax : float
See :func:`mne.Raw.crop()`.
smin : int
Cropping start in samples.
samx : int
Cropping end in samples.
Returns
-------
mne.io.Raw
a cropped mne.Raw object.
Examples
---------
.. ipython:: python
import neurokit2 as nk
raw = nk.mne_data(what="raw")
raw_cropped = nk.mne_crop(raw, smin=200, smax=1200, include_tmax=False)
len(raw_cropped)
"""
# Try loading mne
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: eeg_channel_add(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Convert time to samples
if smin is None or smax is None:
max_time = (raw.n_times - 1) / raw.info["sfreq"]
if tmax is None:
tmax = max_time
if tmin > tmax:
raise ValueError(f"tmin ({tmin}) must be less than tmax ({tmax})")
if tmin < 0.0:
raise ValueError(f"tmin ({tmin}) must be >= 0")
elif tmax > max_time:
raise ValueError(
f"tmax ({tmax}) must be less than or equal to the max time ({max_time} sec)."
)
# Convert time to first and last samples
new_smin, new_smax = np.where(
_time_mask(raw.times, tmin, tmax, sfreq=raw.info["sfreq"], include_tmax=include_tmax)
)[0][[0, -1]]
if smin is None:
smin = new_smin
if smax is None:
smax = new_smax
if include_tmax:
smax += 1
# Re-create the Raw object (note that mne does smin : smin + 1)
raw = mne.io.RawArray(raw._data[:, int(smin) : int(smax)].copy(), raw.info, verbose="WARNING")
return raw
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True, include_tmax=True):
"""Copied from https://github.com/mne-tools/mne-python/mne/utils/numerics.py#L466."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError(f"tmin ({orig_tmin}) must be less than or equal to tmax ({orig_tmax})")
mask = times >= tmin
mask &= times <= tmax
if raise_error and not mask.any():
extra = "" if include_tmax else "when include_tmax=False "
raise ValueError(
f"No samples remain when using tmin={orig_tmin} and tmax={orig_tmax} {extra}"
"(original time bounds are [{times[0]}, {times[-1]}])"
)
return mask
| 4,041 | 32.966387 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_gfp.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_filter
from ..stats import mad, standardize
def eeg_gfp(
eeg,
sampling_rate=None,
method="l1",
normalize=False,
smooth=0,
robust=False,
standardize_eeg=False,
):
"""**Global Field Power (GFP)**
Global Field Power (GFP) constitutes a reference-independent measure of response strength.
GFP was first introduced by Lehmann and Skrandies (1980) and has since become a commonplace
measure among M/EEG users. Mathematically, GFP is the standard deviation of all electrodes
at a given time.
Parameters
----------
eeg : array
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Only necessary if
smoothing is requested.
method : str
Can be either ``l1`` or ``l2`` to use the L1 or L2 norm.
normalize : bool
Normalize GFP.
smooth : float
Can be either ``None`` or a float. If a float, will use this value, multiplied by the
sampling rate.
robust : bool
If ``True``, the GFP extraction (and the data standardization if requested) will be done
using the median/MAD instead of the mean/SD.
standardize_eeg : bool
Standardize (z-score) the data across time prior to GFP extraction using ``nk.standardize()``.
Returns
-------
gfp : array
The global field power of each sample point in the data.
Examples
---------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw")
eeg = nk.eeg_rereference(eeg, 'average')
eeg = eeg.get_data()[:, 0:500] # Get the 500 first data points
* **Example 1:** Compare L1 and L2 norms
.. ipython:: python
l1 = nk.eeg_gfp(eeg, method="l1", normalize=True)
l2 = nk.eeg_gfp(eeg, method="l2", normalize=True)
@savefig p_eeg_gfp1.png scale=100%
nk.signal_plot([l1, l2])
@suppress
plt.close()
* **Example 2:** Compare Mean-based and Median-based
.. ipython:: python
gfp = nk.eeg_gfp(eeg, normalize=True)
gfp_r = nk.eeg_gfp(eeg, normalize=True, robust=True)
@savefig p_eeg_gfp2.png scale=100%
nk.signal_plot([gfp, gfp_r])
@suppress
plt.close()
* **Example 3:** Standardize the data
.. ipython:: python
gfp = nk.eeg_gfp(eeg, normalize=True)
gfp_z = nk.eeg_gfp(eeg, normalize=True, standardize_eeg=True)
@savefig p_eeg_gfp3.png scale=100%
nk.signal_plot([gfp, gfp_z])
@suppress
plt.close()
References
----------
* Lehmann, D., & Skrandies, W. (1980). Reference-free identification of components of
checkerboard-evoked multichannel potential fields. Electroencephalography and clinical
neurophysiology, 48(6), 609-621.
"""
# If MNE object
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
sampling_rate = eeg.info["sfreq"]
eeg = eeg.get_data()
# Normalization
if standardize_eeg is True:
eeg = standardize(eeg, robust=robust)
# Compute GFP
if method.lower() == "l1":
gfp = _eeg_gfp_L1(eeg, robust=robust)
else:
gfp = _eeg_gfp_L2(eeg, robust=robust)
# Normalize (between 0 and 1)
if normalize is True:
gfp = gfp / np.max(gfp)
# Smooth
if smooth is not None and smooth != 0:
gfp = _eeg_gfp_smoothing(gfp, sampling_rate=sampling_rate, window_size=smooth)
return gfp
# =============================================================================
# Utilities
# =============================================================================
def _eeg_gfp_smoothing(gfp, sampling_rate=None, window_size=0.02):
"""Smooth the Global Field Power Curve"""
if sampling_rate is None:
raise ValueError(
"NeuroKit error: eeg_gfp(): You requested to smooth the GFP, for which ",
"we need to know the sampling_rate. Please provide it as an argument.",
)
window = int(window_size * sampling_rate)
if window > 2:
gfp = signal_filter(gfp, method="savgol", order=2, window_size=window)
return gfp
# =============================================================================
# Methods
# =============================================================================
def _eeg_gfp_L1(eeg, robust=False):
if robust is False:
gfp = np.sum(np.abs(eeg - np.mean(eeg, axis=0)), axis=0) / len(eeg)
else:
gfp = np.sum(np.abs(eeg - np.median(eeg, axis=0)), axis=0) / len(eeg)
return gfp
def _eeg_gfp_L2(eeg, robust=False):
if robust is False:
gfp = np.std(eeg, axis=0, ddof=0)
else:
gfp = mad(eeg, axis=0)
return gfp
| 4,869 | 29.061728 | 102 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_data.py | # -*- coding: utf-8 -*-
def mne_data(what="raw", path=None):
"""**Access MNE Datasets**
Utility function to easily access MNE datasets.
Parameters
-----------
what : str
Can be ``"raw"`` or ``"filt-0-40_raw"`` (a filtered version).
path : str
Defaults to ``None``, assuming that the MNE data folder already exists. If not,
specify the directory to download the folder.
Returns
-------
object
The raw mne object.
Examples
---------
.. ipython:: python
import neurokit2 as nk
raw = nk.mne_data(what="raw")
raw = nk.mne_data(what="epochs")
"""
# Try loading mne
try:
import mne
except ImportError:
raise ImportError(
"NeuroKit error: mne_data(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
)
old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
# Find path of mne data
if path is None:
try:
path = str(mne.datasets.sample.data_path())
except ValueError:
raise ValueError(
"NeuroKit error: the mne sample data folder does not exist. ",
"Please specify a path to download the mne datasets.",
)
# Raw
if what in ["raw", "filt-0-40_raw"]:
path += "/MEG/sample/sample_audvis_" + what + ".fif"
data = mne.io.read_raw_fif(path, preload=True)
data = data.pick_types(meg=False, eeg=True)
# Epochs
elif what in ["epochs", "evoked"]:
raw = mne.io.read_raw_fif(path + "/MEG/sample/sample_audvis_filt-0-40_raw.fif").pick_types(
meg=False, eeg=True
)
events = mne.read_events(path + "/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif")
event_id = {"audio/left": 1, "audio/right": 2, "visual/left": 3, "visual/right": 4}
data = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, baseline=(None, 0))
if what in ["evoked"]:
data = [data[name].average() for name in ("audio", "visual")]
else:
raise ValueError("NeuroKit error: mne_data(): the 'what' argument not recognized.")
mne.set_log_level(old_verbosity_level)
return data
| 2,308 | 28.602564 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_rereference.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def eeg_rereference(eeg, reference="average", robust=False, **kwargs):
"""**EEG Rereferencing**
This function can be used for arrays as well as MNE objects.
EEG recordings measure differences in electrical potentials between two points, which means the
signal displayed at any channel is in fact the difference in electrical potential to some other
recording site. Primarily, this other recording site is the ground electrode, which picks up
electrical noise that does not reach the other scalp electrodes. Consequently, the voltage
difference between ground and EEG scalp electrodes is also affected by this noise.
The idea behind re-referencing is to express the voltage at the EEG scalp channels with respect
to another, new reference. It can be composed of any recorded channel or an average of several
channels.
Parameters
-----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
reference : str
See :func:`.mne.set_eeg_reference()`. Can be a string (e.g., 'average', 'lap' for Laplacian
"reference-free" transformation, i.e., CSD), or a list (e.g., ['TP9', 'TP10'] for mastoid
reference).
robust : bool
Only applied if reference is ``average``. If ``True``, will substract the median instead
of the mean.
**kwargs
Optional arguments to be passed into ``mne.set_eeg_rereference()``.
Returns
-------
object
The rereferenced raw mne object.
Examples
---------
.. ipython:: python
import neurokit2 as nk
raw = nk.mne_data("filt-0-40_raw")
eeg = raw.get_data()
* **Example 1:** Difference between robust average
.. ipython:: python
avg = nk.eeg_rereference(eeg, 'average', robust=False)
avg_r = nk.eeg_rereference(eeg, 'average', robust=True)
@savefig p_eeg_rereference1.png scale=100%
nk.signal_plot([avg[0, 0:1000], avg_r[0, 0:1000]], labels=["Normal", "Robust"])
@suppress
plt.close()
* **Example 2:** Compare the rereferencing of an array vs. the MNE object
.. ipython:: python
avg_mne = raw.copy().set_eeg_reference('average', verbose=False)
@savefig p_eeg_rereference2.png scale=100%
nk.signal_plot([avg[0, 0:1000], avg_mne.get_data()[0, 0:1000]])
@suppress
plt.close()
* **Example 3:** Difference between average and LAP
.. ipython:: python
lap = nk.eeg_rereference(raw, 'lap')
@savefig p_eeg_rereference3.png scale=100%
nk.signal_plot(
[avg_mne.get_data()[0, 0:1000], lap.get_data()[0, 0:1000]],
standardize=True
)
@suppress
plt.close()
References
-----------
* Trujillo, L. T., Stanfield, C. T., & Vela, R. D. (2017). The effect of electroencephalogram
(EEG) reference choice on information-theoretic measures of the complexity and integration of
EEG signals. Frontiers in Neuroscience, 11, 425.
"""
# If MNE object
if isinstance(eeg, (pd.DataFrame, np.ndarray)):
eeg = eeg_rereference_array(eeg, reference=reference, robust=robust)
else:
eeg = eeg_rereference_mne(eeg, reference=reference, robust=robust, **kwargs)
return eeg
# =============================================================================
# Methods
# =============================================================================
def eeg_rereference_array(eeg, reference="average", robust=False):
# Average reference
if reference == "average":
if robust is False:
eeg = eeg - np.mean(eeg, axis=0, keepdims=True)
else:
eeg = eeg - np.median(eeg, axis=0, keepdims=True)
else:
raise ValueError(
"NeuroKit error: eeg_rereference(): Only 'average' rereferencing",
" is supported for data arrays for now.",
)
return eeg
def eeg_rereference_mne(eeg, reference="average", robust=False, **kwargs):
eeg = eeg.copy()
if reference == "average" and robust is True:
# Assigning "custom_ref_applied" to True throws an error with the
# latest MNE. If this error goes away in the future, we might able to
# restore this feature.
# > eeg._data = eeg_rereference_array(eeg._data, reference=reference, robust=robust)
# > eeg.info["custom_ref_applied"] = True
raise ValueError(
"NeuroKit error: eeg_rereference(): 'robust=True' currently not supported for MNE",
" objects.",
)
elif reference in ["lap", "csd"]:
try:
import mne
if mne.__version__ < "0.20":
raise ImportError
except ImportError as e:
raise ImportError(
"NeuroKit error: eeg_rereference(): the 'mne' module (version > 0.20) is required "
"for this function to run. Please install it first (`pip install mne`).",
) from e
old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
eeg = mne.preprocessing.compute_current_source_density(eeg, **kwargs)
# Reconvert CSD type to EEG (https://github.com/mne-tools/mne-python/issues/11426)
# channels = np.array(eeg.ch_names)[mne.pick_types(eeg.info, csd=True)]
# eeg.set_channel_types(dict(zip(channels, ["eeg"] * len(channels))))
mne.set_log_level(old_verbosity_level)
else:
eeg = eeg.set_eeg_reference(reference, verbose=False, **kwargs)
return eeg
| 5,622 | 35.512987 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_1f.py | # -*- coding: utf-8 -*-
# import numpy as np
# import pandas as pd
# def mne_channel_extract(raw):
# """1/f Neural Noise
# Extract parameters related to the 1/f structure of the EEG power spectrum.
# Parameters
# ----------
# raw : mne.io.Raw
# Raw EEG data.
# Returns
# ----------
# DataFrame
# A DataFrame or Series containing the channel(s).
# Example
# ----------
# >>> import neurokit2 as nk
# >>> import mne
# >>>
# >>> raw = nk.mne_data("raw")
# >>>
# """
# import mne
# import neurokit2 as nk
# raw = nk.mne_data("raw")
# raw.plot_psd(fmin=0, fmax=40.0, picks=["EEG 050"])
# channel = nk.mne_channel_extract(raw, what=["EEG 050"]).values
# psd = nk.signal_psd(
# channel, sampling_rate=raw.info["sfreq"], show=True, max_frequency=40, method="multitapers"
# )
# plt.loglog(psd["Frequency"], psd["Power"])
# plt.plot(psd["Frequency"], np.log(psd["Power"]))
| 996 | 22.738095 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/utils.py | import numpy as np
import pandas as pd
from .mne_to_df import mne_to_df
def _sanitize_eeg(eeg, sampling_rate=None, time=None):
"""Convert to DataFrame
Input can be an array (channels, time), or an MNE object.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> # Raw objects
>>> eeg = nk.mne_data("raw")
"""
# If array (channels, time), tranpose and convert to DataFrame
if isinstance(eeg, np.ndarray):
eeg = pd.DataFrame(eeg.T)
eeg.columns = [f"EEG_{i}" for i in range(eeg.shape[1])]
# If dataframe
if isinstance(eeg, pd.DataFrame):
return eeg, sampling_rate, time
# Probably an mne object
else:
sampling_rate = eeg.info["sfreq"]
eeg = mne_to_df(eeg)
time = eeg["Time"].values
eeg = eeg.drop(columns=["Time"])
return eeg, sampling_rate, time
| 874 | 22.648649 | 66 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_source_extract.py | import pandas as pd
def eeg_source_extract(stc, src, segmentation="PALS_B12_Lobes", verbose="WARNING", **kwargs):
"""**Extract the activity from an anatomical source**
Returns a dataframe with the activity from each source in the segmentation.
Parcellation models include:
* 'aparc'
* 'aparc.a2005s'
* 'aparc.a2009s'
* 'oasis.chubs'
* 'PALS_B12_Brodmann'
* 'PALS_B12_Lobes'
* 'PALS_B12_OrbitoFrontal'
* 'PALS_B12_Visuotopic'
* 'Yeo2011_17Networks_N1000'
* 'Yeo2011_7Networks_N1000'
Parameters
----------
stc : mne.SourceEstimate
An SourceEstimate object as obtained by ``eeg_source()``.
src : mne.SourceSpaces
An SourceSpaces object as obtained by ``eeg_source()``.
segmentation : str
See above.
verbose : str
Verbosity level for MNE.
**kwargs
Other arguments to be passed to ``mne.extract_label_time_course()``.
Examples
---------
.. ipython:: python
:verbatim:
import neurokit2 as nk
raw = nk.mne_data("filt-0-40_raw")
src, bem = nk.mne_templateMRI()
stc, src = nk.eeg_source(raw, src, bem)
data = nk.eeg_source_extract(stc, src, segmentation="PALS_B12_Lobes")
data.head()
"""
# Try loading mne
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: mne_templateMRI(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Find labels
labels = mne.read_labels_from_annot(
subject="fsaverage",
parc=segmentation,
subjects_dir=str(mne.datasets.sample.data_path()) + "/subjects",
verbose=verbose,
)
# Filter empty ones
labels = [lab for lab in labels if len(lab) > 0]
# Filter Unknown ones
labels = [lab for lab in labels if "?" not in lab.name]
tcs = stc.extract_label_time_course(
labels,
src=src,
verbose=verbose,
**kwargs,
)
return pd.DataFrame(tcs.T, columns=[lab.name for lab in labels])
| 2,135 | 25.7 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_diss.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .eeg_gfp import eeg_gfp
def eeg_diss(eeg, gfp=None, **kwargs):
"""**Global dissimilarity (DISS)**
Global dissimilarity (DISS) is an index of configuration differences between two electric
fields, independent of their strength. Like GFP, DISS was first introduced by Lehmann and
Skrandies (1980). This parameter equals the square root of the mean of the squared differences
between the potentials measured at each electrode (versus the average reference), each of which
is first scaled to unitary strength by dividing by the instantaneous GFP.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
gfp : list
The Global Field Power (GFP). If ``None``, will be obtained via ``eeg_gfp()``.
**kwargs
Optional arguments to be passed into ``nk.eeg_gfp()``.
Returns
-------
np.ndarray
DISS of each sample point in the data.
Examples
---------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw")
eeg = eeg.set_eeg_reference('average')
gfp = nk.eeg_gfp(eeg)
diss = nk.eeg_diss(eeg, gfp=gfp)
@savefig p_eeg_diss1.png scale=100%
nk.signal_plot([gfp[0:300], diss[0:300]], standardize=True)
@suppress
plt.close()
References
----------
* Lehmann, D., & Skrandies, W. (1980). Reference-free identification of components of
checkerboard-evoked multichannel potential fields. Electroencephalography and clinical
neurophysiology, 48(6), 609-621.
"""
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
eeg = eeg.get_data()
if gfp is None:
gfp = eeg_gfp(eeg, **kwargs)
normalized = eeg / gfp
diff = np.diff(normalized, axis=1)
diss = np.mean(np.power(diff, 2), axis=0)
# Preserve length
diss = np.insert(diss, 0, 0, axis=0)
return diss
| 2,026 | 27.957143 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/__init__.py | """Submodule for NeuroKit."""
from .eeg_badchannels import eeg_badchannels
from .eeg_diss import eeg_diss
from .eeg_gfp import eeg_gfp
from .eeg_power import eeg_power
from .eeg_rereference import eeg_rereference
from .eeg_simulate import eeg_simulate
from .eeg_source import eeg_source
from .eeg_source_extract import eeg_source_extract
from .mne_channel_add import mne_channel_add
from .mne_channel_extract import mne_channel_extract
from .mne_crop import mne_crop
from .mne_data import mne_data
from .mne_templateMRI import mne_templateMRI
from .mne_to_df import mne_to_df, mne_to_dict
__all__ = [
"mne_data",
"mne_channel_add",
"mne_channel_extract",
"mne_crop",
"mne_to_df",
"mne_to_dict",
"mne_templateMRI",
"eeg_simulate",
"eeg_source",
"eeg_source_extract",
"eeg_power",
"eeg_rereference",
"eeg_gfp",
"eeg_diss",
"eeg_badchannels",
]
| 904 | 24.857143 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_templateMRI.py | import os
def mne_templateMRI(verbose="WARNING"):
"""**Return Path of MRI Template**
This function is a helper that returns the path of the MRI template for adults (the ``src`` and
the ``bem``) that is made available through ``"MNE"``. It downloads the data if need be. These
templates can be used for EEG source reconstruction when no individual MRI is available.
See https://mne.tools/stable/auto_tutorials/forward/35_eeg_no_mri.html
Parameters
----------
verbose : str
Verbosity level for MNE.
Examples
---------
.. ipython:: python
import neurokit2 as nk
src, bem = nk.mne_templateMRI()
"""
# Try loading mne (requires also the 'pooch' package)
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: mne_templateMRI(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Download fsaverage files
fs_dir = mne.datasets.fetch_fsaverage(verbose=verbose)
# The files live in:
src = os.path.join(fs_dir, "bem", "fsaverage-ico-5-src.fif")
bem = os.path.join(fs_dir, "bem", "fsaverage-5120-5120-5120-bem-sol.fif")
return src, bem
| 1,277 | 28.72093 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_channel_extract.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def mne_channel_extract(raw, what, name=None, add_firstsamples=False):
"""**Channel extraction from MNE objects**
Select one or several channels by name and returns them in a dataframe.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
what : str or list
Can be ``"MEG"``, which will extract all MEG channels, ``"EEG"``, which will extract all EEG
channels, or ``"EOG"``, which will extract all EOG channels (that is, if channel names are
named with prefixes of their type e.g., 'EEG 001' etc. or 'EOG 061'). Provide exact a single
or a list of channel's name(s) if not (e.g., ['124', '125']).
name : str or list
Useful only when extracting one channel. Can also take a list of names for renaming multiple channels,
Otherwise, defaults to ``None``.
add_firstsamples : bool
Defaults to ``False``. MNE's objects store the value of a delay between
the start of the system and the start of the recording
(see https://mne.tools/stable/generated/mne.io.Raw.html#mne.io.Raw.first_samp).
Taking this into account can be useful when extracting channels from the Raw object to
detect events indices that are passed back to MNE again. When ``add_firstsamples`` is set to
``True``, the offset will be explicitly added at the beginning of the signal and filled with
NaNs. If ``add_firstsamples`` is a float or an integer, the offset will filled with these
values instead. If it is set to ``backfill``, will prepend with the first real value.
Returns
----------
DataFrame
A DataFrame or Series containing the channel(s).
Example
----------
.. ipython:: python
import neurokit2 as nk
import mne
raw = nk.mne_data("raw")
raw_channel = nk.mne_channel_extract(raw, what=["EEG 060", "EEG 055"], name=['060', '055'])
eeg_channels = nk.mne_channel_extract(raw, "EEG")
"""
channels_all = raw.copy().info["ch_names"]
# Select category of channels
if what in ["EEG", "EOG", "MEG"]:
what = [x for x in channels_all if what in x]
# Select a single specified channel
elif isinstance(what, str):
what = [what]
# Select a few specified channels
elif isinstance(what, list):
if not all(x in channels_all for x in what):
raise ValueError(
"NeuroKit error: mne_channel_extract(): List of channels not found. Please "
"check channel names in raw.info['ch_names']. "
)
channels, __ = raw.copy().pick_channels(what, ordered=False)[:]
if len(what) > 1:
channels = pd.DataFrame(channels.T, columns=what)
if name is not None:
channels.columns = name
else:
channels = pd.Series(channels[0])
channels.what = what[0]
if name is not None:
channels = channels.rename(name)
# Add first_samp
if isinstance(add_firstsamples, bool) and add_firstsamples is True: # Fill with na
add_firstsamples = np.nan
if isinstance(add_firstsamples, str): # Back fill
add_firstsamples = channels.iloc[0]
if isinstance(channels, pd.DataFrame):
add_firstsamples = dict(add_firstsamples)
if add_firstsamples is not False:
if isinstance(channels, pd.Series):
fill = pd.Series(add_firstsamples, index=range(-raw.first_samp, 0))
channels = pd.concat([fill, channels], axis=0)
elif isinstance(channels, pd.DataFrame):
fill = pd.DataFrame(
add_firstsamples, index=range(-raw.first_samp, 0), columns=channels.columns
)
channels = pd.concat([fill, channels], axis=0)
return channels
| 3,844 | 38.639175 | 110 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_simulate.py | import numpy as np
from ..misc import check_random_state
def eeg_simulate(duration=1, length=None, sampling_rate=1000, noise=0.1, random_state=None):
"""**EEG Signal Simulation**
Simulate an artificial EEG signal. This is a crude implementation based on the MNE-Python raw
simulation example. Help is needed to improve this function.
Parameters
----------
duration : int
Desired recording length in seconds.
length : int
The desired length of the signal (in samples).
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
noise : float
Noise level.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
Examples
----------
.. ipython:: python
import neurokit2 as nk
eeg = nk.eeg_simulate(duration=3, sampling_rate=500, noise=0.2)
@savefig p_eeg_simulate1.png scale=100%
_ = nk.signal_psd(eeg, sampling_rate=500, show=True, max_frequency=100)
@suppress
plt.close()
"""
# Try loading mne
try:
import mne
import mne.datasets
import mne.simulation
except ImportError as e:
raise ImportError(
"The 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
if duration is None:
duration = length / sampling_rate
# Get paths to data
path = mne.datasets.sample.data_path() / "MEG" / "sample"
raw_file = path / "sample_audvis_raw.fif"
fwd_file = path / "sample_audvis-meg-eeg-oct-6-fwd.fif"
# Load real data as the template
raw = mne.io.read_raw_fif(raw_file, preload=True, verbose=False)
raw = raw.set_eeg_reference(projection=True, verbose=False)
n_dipoles = 4 # number of dipoles to create
def data_fun(times, n_dipoles=4):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
n = 0 # harmonic number
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.0
n += 1
data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times)
data *= window
return data
times = raw.times[: int(raw.info["sfreq"] * 2)]
fwd = mne.read_forward_solution(fwd_file, verbose=False)
stc = mne.simulation.simulate_sparse_stc(
fwd["src"],
n_dipoles=n_dipoles,
times=times,
data_fun=data_fun,
random_state=rng,
)
# Repeat the source activation multiple times.
raw_sim = mne.simulation.simulate_raw(raw.info, [stc] * int(np.ceil(duration / 2)), forward=fwd, verbose=False)
cov = mne.make_ad_hoc_cov(raw_sim.info, std=noise / 1000000)
raw_sim = mne.simulation.add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], verbose=False, random_state=rng)
# Resample
raw_sim = raw_sim.resample(sampling_rate, verbose=False)
# Add artifacts
# mne.simulation.add_ecg(raw_sim, verbose=False)
# mne.simulation.add_eog(raw_sim, verbose=False)
eeg = raw_sim.pick_types(eeg=True, verbose=False).get_data()
return eeg[0, 0 : int(duration * sampling_rate)]
| 3,600 | 32.654206 | 115 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_to_df.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def mne_to_df(eeg):
"""**Conversion from MNE to dataframes**
Convert MNE objects to dataframe or dict of dataframes.
Parameters
----------
eeg : Union[mne.io.Raw, mne.Epochs]
Raw or Epochs M/EEG data from MNE.
See Also
--------
.mne_to_dict
Returns
----------
DataFrame
A DataFrame containing all epochs identifiable by the ``"Epoch"`` column, which time axis
is stored in the ``"Time"`` column.
Examples
---------
**Raw objects**
.. ipython:: python
import neurokit2 as nk
# Download MNE Raw object
eeg = nk.mne_data("filt-0-40_raw")
nk.mne_to_df(eeg)
**Epoch objects**
.. ipython:: python
# Download MNE Epochs object
eeg = nk.mne_data("epochs")
nk.mne_to_df(eeg)
**Evoked objects**
.. ipython:: python
# Download MNE Evoked object
eeg = nk.mne_data("evoked")
nk.mne_to_df(eeg)
"""
return _mne_convert(eeg, to_what="df")
# Dict
def mne_to_dict(eeg):
"""**Convert MNE Raw or Epochs object to a dictionary**
Parameters
----------
eeg : Union[mne.io.Raw, mne.Epochs]
Raw or Epochs M/EEG data from MNE.
See Also
--------
mne_to_df
Returns
----------
dict
A dict containing all epochs identifiable by the 'Epoch' column, which time axis
is stored in the 'Time' column.
Examples
---------
.. ipython:: python
import neurokit2 as nk
import mne
# Raw objects
eeg = nk.mne_data("filt-0-40_raw")
nk.mne_to_dict(eeg)
# Epochs objects
eeg = nk.mne_data("epochs")
nk.mne_to_dict(eeg)
# Evoked objects
eeg = nk.mne_data("evoked")
nk.mne_to_dict(eeg)
"""
return _mne_convert(eeg, to_what="dict")
# =============================================================================
# Main function
# =============================================================================
def _mne_convert(eeg, to_what="df"):
# Try loading mne
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: eeg_add_channel(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
# If raw object
if isinstance(eeg, (mne.io.Raw, mne.io.RawArray)):
data = eeg.to_data_frame(time_format=None)
data = data.rename(columns={"time": "Time"})
if to_what == "dict":
data = data.to_dict(orient="list")
# If epoch object
elif isinstance(eeg, mne.Epochs):
data = eeg.to_data_frame(time_format=None)
data = data.rename(columns={"time": "Time", "condition": "Condition", "epoch": "Epoch"})
if to_what == "dict":
out = {}
for epoch in data["Epoch"].unique():
out[epoch] = data[data["Epoch"] == epoch]
data = out
# If dataframe, skip and return
elif isinstance(eeg, pd.DataFrame):
data = eeg
if to_what == "dict":
data = data.to_dict(orient="list")
# If dataframe, skip and return
elif isinstance(eeg, np.ndarray):
data = pd.DataFrame(eeg)
if to_what == "dict":
data = data.to_dict(orient="list")
# it might be an evoked object
else:
dfs = []
for i, evoked in enumerate(eeg):
data = evoked.to_data_frame(time_format=None)
data = data.rename(columns={"time": "Time"})
data.insert(1, "Condition", evoked.comment)
dfs.append(data)
data = pd.concat(dfs, axis=0)
if to_what == "dict":
out = {}
for condition in data["Condition"].unique():
out[condition] = data[data["Condition"] == condition]
data = out
mne.set_log_level(old_verbosity_level)
return data
| 4,091 | 24.416149 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/mne_channel_add.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def mne_channel_add(
raw, channel, channel_type=None, channel_name=None, sync_index_raw=0, sync_index_channel=0
):
"""**Add channel as array to MNE**
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg
data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data from MNE.
channel : list or array
The signal to be added.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg',
'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
channel_name : str
Desired channel name.
sync_index_raw : int or list
An index (e.g., the onset of the same event marked in the same signal), in the raw data, by
which to align the two inputs. This can be used in case the EEG data and the channel to add
do not have the same onsets and must be aligned through some common event.
sync_index_channel : int or list
An index (e.g., the onset of the same event marked in the same signal), in the channel to
add, by which to align the two inputs. This can be used in case the EEG data and the channel
to add do not have the same onsets and must be aligned through some common event.
Returns
----------
mne.io.Raw
Raw data in FIF format.
Example
----------
.. ipython:: python
import neurokit2 as nk
import mne
raw = nk.mne_data("filt-0-40_raw")
ecg = nk.ecg_simulate(length=50000)
# Let the 42nd sample point in the EEG signal correspond to the 333rd point in the ECG
event_index_in_eeg = 42
event_index_in_ecg = 333
raw = nk.mne_channel_add(raw,
ecg,
sync_index_raw=event_index_in_eeg,
sync_index_channel=event_index_in_ecg,
channel_type="ecg")
"""
# Try loading mne
try:
import mne
except ImportError:
raise ImportError(
"NeuroKit error: eeg_channel_add(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
)
if channel_name is None:
if isinstance(channel, pd.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_raw
# Pre-empt the channel with nans if shorter or crop if longer
if diff > 0:
channel = list(channel)[diff : len(channel)]
channel = channel + [np.nan] * diff
if diff < 0:
channel = [np.nan] * abs(diff) + list(channel)
# Extend the channel with nans if shorter or crop if longer
if len(channel) < len(raw):
channel = list(channel) + [np.nan] * (len(raw) - len(channel))
else:
# Crop to fit the raw data
channel = list(channel)[0 : len(raw)]
old_verbosity_level = mne.set_log_level(verbose="WARNING", return_old_level=True)
# Create RawArray
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
# Add channel
raw = raw.copy()
raw.add_channels([channel], force_update_info=True) # In-place
# Restore old verbosity level
mne.set_log_level(old_verbosity_level)
return raw
| 3,693 | 32.889908 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eeg/eeg_power.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..signal import signal_power
from .utils import _sanitize_eeg
def eeg_power(
eeg, sampling_rate=None, frequency_band=["Gamma", "Beta", "Alpha", "Theta", "Delta"], **kwargs
):
"""**EEG Power in Different Frequency Bands**
See our `walkthrough <https://neuropsychology.github.io/NeuroKit/examples/eeg_power/eeg_power.html>`_ for
details.
* **Gamma** (30-80 Hz)
* **Beta** (13-30 Hz)
* **Beta 1** (13-16 Hz)
* **Beta 2** (16-20 Hz)
* **Beta 3** (20-30 Hz)
* **SMR** (13-15 Hz)
* **Alpha** (8-13 Hz)
* **Mu** (9-11 Hz)
* **Theta** (4-8 Hz)
* **Delta** (1-4 Hz)
Parameters
----------
eeg : array
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Only necessary if
smoothing is requested.
frequency_band : list
A list of frequency bands (or tuples of frequencies).
**kwargs
Other arguments to be passed to ``nk.signal_power()``.
Returns
-------
pd.DataFrame
The power in different frequency bands for each channel.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Raw objects
eeg = nk.mne_data("raw")
by_channel = nk.eeg_power(eeg)
by_channel.head()
average = by_channel.mean(numeric_only=True, axis=0)
average["Gamma"]
References
----------
- Lehmann, D., & Skrandies, W. (1980). Reference-free identification of components of
checkerboard-evoked multichannel potential fields. Electroencephalography and clinical
neurophysiology, 48(6), 609-621.
"""
# Sanitize names and values
frequency_band, band_names = _eeg_power_sanitize(frequency_band)
# Sanitize input
eeg, sampling_rate, _ = _sanitize_eeg(eeg, sampling_rate=sampling_rate)
# Iterate through channels
data = []
for channel in eeg.columns:
rez = signal_power(
eeg[channel].values,
sampling_rate=sampling_rate,
frequency_band=frequency_band,
**kwargs,
)
data.append(rez)
data = pd.concat(data, axis=0)
data.columns = band_names
data.insert(0, "Channel", eeg.columns)
data.reset_index(drop=True, inplace=True)
return data
# =============================================================================
# Utilities
# =============================================================================
def _eeg_power_sanitize(frequency_band=["Gamma", "Beta", "Alpha", "Theta", "Delta"]):
band_names = frequency_band.copy() # This will used for the names
for i, f in enumerate(frequency_band):
if isinstance(f, str):
f_name = f.lower()
if f_name == "gamma":
frequency_band[i] = (30, 80)
elif f_name == "beta":
frequency_band[i] = (13, 30)
elif f_name == "beta1":
frequency_band[i] = (13, 16)
elif f_name == "beta2":
frequency_band[i] = (16, 20)
elif f_name == "beta3":
frequency_band[i] = (20, 30)
elif f_name == "smr":
frequency_band[i] = (13, 15)
elif f_name == "alpha":
frequency_band[i] = (8, 13)
elif f_name == "mu":
frequency_band[i] = (9, 11)
elif f_name == "theta":
frequency_band[i] = (4, 8)
elif f_name == "delta":
frequency_band[i] = (1, 4)
else:
raise ValueError(f"Unknown frequency band: '{f_name}'")
elif isinstance(f, tuple):
band_names[i] = f"Hz_{f[0]}_{f[1]}"
else:
raise ValueError("'frequency_band' must be a list of tuples (or strings).")
return frequency_band, band_names
| 3,963 | 30.460317 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/benchmark/benchmark_ecg.py | # -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
from ..signal import signal_period
def benchmark_ecg_preprocessing(function, ecg, rpeaks=None, sampling_rate=1000):
"""**Benchmark ECG preprocessing pipelines**
Parameters
----------
function : function
Must be a Python function which first argument is the ECG signal and which has a
``sampling_rate`` argument.
ecg : pd.DataFrame or str
The path to a folder where you have an `ECGs.csv` file or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
rpeaks : pd.DataFrame or str
The path to a folder where you have an `Rpeaks.csv` fils or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Only used if ``ecgs``
and ``rpeaks`` are single vectors.
Returns
--------
pd.DataFrame
A DataFrame containing the results of the benchmarking
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Define a preprocessing routine
def function(ecg, sampling_rate):
signal, info = nk.ecg_peaks(ecg, method="engzeemod2012", sampling_rate=sampling_rate)
return info["ECG_R_Peaks"]
# Synthetic example
ecg = nk.ecg_simulate(duration=20, sampling_rate=200)
true_rpeaks = nk.ecg_peaks(ecg, sampling_rate=200)[1]["ECG_R_Peaks"]
nk.benchmark_ecg_preprocessing(function, ecg, true_rpeaks, sampling_rate=200)
# Example using database (commented-out)
# nk.benchmark_ecg_preprocessing(function, r"path/to/GUDB_database")
"""
# find data
if rpeaks is None:
rpeaks = ecg
if isinstance(ecg, str):
ecg = pd.read_csv(ecg + "/ECGs.csv")
if isinstance(rpeaks, str):
rpeaks = pd.read_csv(rpeaks + "/Rpeaks.csv")
if isinstance(ecg, pd.DataFrame):
results = _benchmark_ecg_preprocessing_databases(function, ecg, rpeaks)
else:
results = _benchmark_ecg_preprocessing(function, ecg, rpeaks, sampling_rate=sampling_rate)
return results
# =============================================================================
# Utils
# =============================================================================
def _benchmark_ecg_preprocessing_databases(function, ecgs, rpeaks):
"""A wrapper over _benchmark_ecg_preprocessing when the input is a database."""
# Run algorithms
results = []
for participant in ecgs["Participant"].unique():
for database in ecgs[ecgs["Participant"] == participant]["Database"].unique():
# Extract the right slice of data
ecg_slice = ecgs[(ecgs["Participant"] == participant) & (ecgs["Database"] == database)]
rpeaks_slice = rpeaks[(rpeaks["Participant"] == participant) & (rpeaks["Database"] == database)]
sampling_rate = ecg_slice["Sampling_Rate"].unique()[0]
# Extract values
ecg = ecg_slice["ECG"].values
rpeak = rpeaks_slice["Rpeaks"].values
# Run benchmark
result = _benchmark_ecg_preprocessing(function, ecg, rpeak, sampling_rate)
# Add info
result["Participant"] = participant
result["Database"] = database
results.append(result)
return pd.concat(results)
def _benchmark_ecg_preprocessing(function, ecg, rpeak, sampling_rate=1000):
# Apply function
t0 = datetime.datetime.now()
try:
found_rpeaks = function(ecg, sampling_rate=sampling_rate)
duration = (datetime.datetime.now() - t0).total_seconds()
# In case of failure
except Exception as error: # pylint: disable=broad-except
return pd.DataFrame(
{
"Sampling_Rate": [sampling_rate],
"Duration": [np.nan],
"Score": [np.nan],
"Recording_Length": [len(ecg) / sampling_rate / 60],
"Error": str(error),
}
)
# Compare R peaks
score, error = benchmark_ecg_compareRpeaks(rpeak, found_rpeaks, sampling_rate=sampling_rate)
return pd.DataFrame(
{
"Sampling_Rate": [sampling_rate],
"Duration": [duration],
"Score": [score],
"Recording_Length": [len(ecg) / sampling_rate / 60],
"Error": error,
}
)
# =============================================================================
# Comparison methods
# =============================================================================
def benchmark_ecg_compareRpeaks(true_rpeaks, found_rpeaks, sampling_rate=250):
# Failure to find sufficient R-peaks
if len(found_rpeaks) <= 3:
return np.nan, "R-peaks detected <= 3"
length = np.max(np.concatenate([true_rpeaks, found_rpeaks]))
true_interpolated = signal_period(
true_rpeaks, sampling_rate=sampling_rate, desired_length=length, interpolation_method="linear"
)
found_interpolated = signal_period(
found_rpeaks, sampling_rate=sampling_rate, desired_length=length, interpolation_method="linear"
)
return np.mean(np.abs(found_interpolated - true_interpolated)), "None"
| 5,347 | 33.727273 | 108 | py |
NeuroKit | NeuroKit-master/neurokit2/benchmark/benchmark_utils.py | from timeit import default_timer as timer
from wfdb.processing import compare_annotations
def benchmark_record(record, sampling_rate, annotation, tolerance, detector):
"""**Obtain detector performance for an annotated record**
Parameters
----------
record : array
The raw physiological record.
sampling_rate: int
The sampling rate of the record in Hertz.
annotation : array
The manual extrema annotations.
tolerance : int
Maximum difference in millisecond that is permitted between the manual
annotation and the annotation generated by the detector.
detector : function
A function that takes a physiological record as first positional
argument as well as a ``sampling_rate`` keyword argument.
Returns
-------
precision : float
The detectors precision on the record given the tolerance.
sensitivity : float
The detectors sensitivity on the record given the tolerance.
"""
detector_annotation = detector(record, sampling_rate=sampling_rate)
comparitor = compare_annotations(detector_annotation, annotation, tolerance)
tp = comparitor.tp
fp = comparitor.fp
fn = comparitor.fn
sensitivity = tp / (tp + fn)
precision = tp / (tp + fp)
return precision, sensitivity
def time_record(record, sampling_rate, detector, n_runs):
"""**Obtain the average run time of a detector on a record over N runs**
Parameters
----------
record : array
The raw physiological record.
sampling_rate : int
The sampling rate of the record in Hertz
detector : function
A function that takes a physiological record as first positional
argument as well as a ``sampling_rate`` keyword argument.
n_runs : int
The number of runs.
Returns
-------
avg_time
The run time of the detector on the record averaged over n_runs. In
milliseconds.
"""
start = timer()
for _ in range(n_runs):
detector(record, sampling_rate=sampling_rate)
end = timer()
avg_time = (end - start) / n_runs * 1000
return avg_time
| 2,161 | 28.216216 | 80 | py |
NeuroKit | NeuroKit-master/neurokit2/benchmark/__init__.py | """Submodule for NeuroKit."""
from .benchmark_ecg import benchmark_ecg_preprocessing
__all__ = [
"benchmark_ecg_preprocessing",
]
| 137 | 14.333333 | 54 | py |
NeuroKit | NeuroKit-master/neurokit2/data/write_csv.py | import numpy as np
def write_csv(data, filename, parts=None, **kwargs):
"""**Write data to multiple csv files**
Split the data into multiple CSV files. You can then re-create them as follows:
Parameters
----------
data : list
List of dictionaries.
filename : str
Name of the CSV file (without the extension).
parts : int
Number of parts to split the data into.
Returns
-------
None
Example
--------
Save big file in parts
.. ipython:: python
import pandas as pd
import neurokit2 as nk
# Split data into multiple files
# nk.write_csv(data, 'C:/Users/.../data', parts=6)
Read the files back
.. ipython:: python
# Iterate through 6-parts and concatenate the pieces
# data_all = pd.concat(
# [pd.read_csv(f"data_part{i}.csv") for i in range(1, 7)],
# axis=0,
# )
"""
if isinstance(parts, int):
# Add column to identify parts
data["__Part__"] = np.repeat(range(parts), np.ceil(len(data) / parts))[0 : len(data)]
for i, part in data.groupby("__Part__"):
part.drop(["__Part__"], axis=1).to_csv(filename + f"_part{i + 1}.csv", **kwargs)
else:
data.to_csv(filename, **kwargs)
| 1,287 | 23.301887 | 93 | py |
NeuroKit | NeuroKit-master/neurokit2/data/read_bitalino.py | # -*- coding: utf-8 -*-
import json
import os
from warnings import warn
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
def read_bitalino(filename):
"""**Read an OpenSignals file (from BITalino)**
Reads and loads a BITalino file into a Pandas DataFrame.
The function outputs both the dataframe and the information (such as the sampling rate)
retrieved from the OpenSignals file).
Parameters
----------
filename : str
Filename (with or without the extension) of an OpenSignals file (e.g., ``"data.txt"``).
Returns
----------
df : DataFrame, dict
The BITalino file as a pandas dataframe if one device was read, or a dictionary
of pandas dataframes (one dataframe per device) if multiple devices are read.
info : dict
The metadata information containing the sensors, corresponding channel names, sampling
rate, and the events annotation timings if ``events_annotation`` is ``True``.
See Also
--------
.read_acqknowledge, .signal_resample
Examples
--------
.. ipython:: python
import neurokit2 as nk
# data, info = nk.read_bitalino("data.txt")
# sampling_rate = info["sampling_rate"]
"""
# Read metadata
# -------------------------------------------------------------------------
with open(filename, "r") as f:
lines = f.readlines()
if "OpenSignals" not in lines[0]:
raise ValueError("Text file is not in OpenSignals format.")
metadata = json.loads(lines[1][1:]) # read second line + skip '#'
# Remove ":"
metadata = {k.replace(":", ""): metadata[k] for k in metadata.keys()}
# Try find events annotations
# -------------------------------------------------------------------------
annotations = _read_bitalino_annotations(filename)
if annotations is not None:
for k in annotations.keys():
if k in metadata.keys():
metadata[k]["Annotations"] = annotations[k]
else:
warn(
f"Device {k} not found in metadata ({metadata.keys()})."
+ " Something might be wrong.",
category=NeuroKitWarning,
)
# Read data
# -------------------------------------------------------------------------
data = {k: None for k in metadata.keys()}
raw = pd.read_csv(filename, sep="\t", header=None, comment="#")
# Read file for each device
for i, k in enumerate(metadata.keys()):
# Select right columns
ch = np.array(metadata[k]["column"])
data[k] = raw.iloc[:, i * len(ch) : (i + 1) * len(ch)]
for j, s in enumerate(metadata[k]["label"]):
ch[ch == s] = metadata[k]["sensor"][j]
data[k].columns = ch
# Add annotations
if "Annotations" in metadata[k].keys():
sr = metadata[k]["sampling rate"]
data[k]["Events"] = 0
annot = metadata[k]["Annotations"]
annot = annot[annot["CHANNEL"].isin(metadata[k]["label"])]
annot = annot.drop_duplicates(["START", "END"])
for _, row in annot.iterrows():
data[k]["Events"][int(row["START"] * sr) : int(row["END"] * sr) + 1] = 1
# Format metadata names
metadata[k] = {x.replace(" ", "_"): v for x, v in metadata[k].items()}
# If only one device is detected, extract from dict
if i == 0:
data = data[k]
metadata = metadata[k]
return data, metadata
# =============================================================================
# Internals
# =============================================================================
def _read_bitalino_annotations(filename):
"""Read events that are annotated during BITalino signal acquisition.
Returns a dictionary containing the start and stop times (in seconds) in each channel detected
per unique event (label) within each device."""
file = filename.replace(".txt", "_EventsAnnotation.txt")
if os.path.isfile(file) is False:
return None
with open(file, "r") as f:
lines = f.readlines()
if "OpenSignals" not in lines[0]:
raise ValueError("Text file is not in OpenSignals format.")
metadata = json.loads(lines[1][1:]) # read second line + skip '#'
data = pd.read_csv(file, sep="\t", header=None, comment="#")
data = data.dropna(axis=1, how="all")
data.columns = metadata["columns"]["labels"]
return {k: data[data["MAC"] == k] for k in data["MAC"].unique()}
| 4,610 | 34.469231 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/data/read_video.py | # -*- coding: utf-8 -*-
import os
import numpy as np
def read_video(filename="video.mp4"):
"""**Reads a video file into an array**
Reads a video file (e.g., .mp4) into a numpy array of shape. This function requires OpenCV to
be installed via the ``opencv-python`` package.
Parameters
----------
filename : str
The path of a video file.
Returns
-------
array
numpy array of shape (frame, RGB-channel, height, width).
int
Sampling rate in frames per second.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# video, sampling_rate = nk.read_video("video.mp4")
"""
# Try loading cv2
try:
import cv2
except ImportError:
raise ImportError(
"The 'cv2' module is required for this function to run. ",
"Please install it first (`pip install opencv-python`).",
)
# Check if file exists
assert os.path.isfile(filename) is True, f"No file found with the specified name ({filename})."
capture = cv2.VideoCapture(filename)
sampling_rate = int(capture.get(cv2.CAP_PROP_FPS))
frames = []
while capture.isOpened():
success, frame = capture.read() # By default frame is BGR
if not success:
break
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # Convert to RGB
capture.release()
# Swap axes to be (frame, RGB-channel, height, width)
return np.array(frames).swapaxes(3, 1).swapaxes(3, 2), sampling_rate
| 1,541 | 25.586207 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/data/data.py | # -*- coding: utf-8 -*-
import json
import os
import pickle
import urllib
import pandas as pd
from sklearn import datasets as sklearn_datasets
def data(dataset="bio_eventrelated_100hz"):
"""**NeuroKit Datasets**
NeuroKit includes datasets that can be used for testing. These datasets are not downloaded
automatically with the package (to avoid increasing its weight), but can be downloaded via the
``nk.data()`` function (note that an internet connection is necessary). See the examples below.
**Signals**: The following signals (that will return an array) are available:
* **ecg_1000hz**: Returns a vector containing ECG signal (``sampling_rate=1000``).
* **ecg_3000hz**: Returns a vector containing ECG signal (``sampling_rate=3000``).
* **rsp_1000hz**: Returns a vector containing RSP signal (``sampling_rate=1000``).
* **eeg_150hz**: Returns a vector containing EEG signal (``sampling_rate=150``).
* **eog_100hz**: Returns a vector containing vEOG signal (``sampling_rate=100``).
**DataFrames**: The following datasets (that will return a ``pd.DataFrame``) are available:
* **iris**: Convenient access to the Iris dataset in a DataFrame, exactly how it is in R.
* **eogs_200hz**: Returns a DataFrame with ``hEOG``, ``vEOG``.
* Single subject
* Visual and horizontal electrooculagraphy
* ``sampling_rate=200``
* **bio_resting_5min_100hz**: Returns a DataFrame with ``ECG``, ``PPG``, ``RSP``.
* Single subject
* Resting-state of 5 min (pre-cropped, with some ECG noise towards the end)
* ``sampling_rate=100``
* **bio_resting_8min_100hz**: Returns a DataFrame with ``ECG``, ``RSP``, ``EDA``,
``PhotoSensor``.
* Single subject
* Resting-state of 8 min when the photosensor is low (need to crop the data)
* ``sampling_rate=100``
* **bio_resting_8min_200hz**: Returns a dictionary with four subjects (``S01``, ``S02``,
``S03``, ``S04``).
* Resting-state recordings
* 8 min (``sampling_rate=200``)
* Each subject is DataFrame with ``ECG``, ``RSP`, ``PhotoSensor``, ``Participant``
* **bio_eventrelated_100hz**: Returns a DataFrame with ``ECG``, ``EDA``, ``Photosensor``,
``RSP``.
* Single subject
* Event-related recording of a participant watching 4 images for 3 seconds (the condition
order was: ``["Negative", "Neutral", "Neutral", "Negative"]``)
* ``sampling_rate=100``
* **eeg_1min_200hz**: Returns an MNE raw object containing 1 min of EEG
data (from the MNE-sample dataset).
Parameters
----------
dataset : str
The name of the dataset.
Returns
-------
DataFrame
The data.
Examples
---------
**Single signals and vectors**
.. ipython:: python
import neurokit2 as nk
ecg = nk.data(dataset="ecg_1000hz")
@savefig p_datasets1.png scale=100%
nk.signal_plot(ecg[0:10000], sampling_rate=1000)
@suppress
plt.close()
.. ipython:: python
rsp = nk.data(dataset="rsp_1000hz")
@savefig p_datasets2.png scale=100%
nk.signal_plot(rsp[0:20000], sampling_rate=1000)
@suppress
plt.close()
.. ipython:: python
eeg = nk.data("eeg_150hz")
@savefig p_data3.png scale=100%
nk.signal_plot(eeg, sampling_rate=150)
@suppress
plt.close()
.. ipython:: python
eog = nk.data("eog_100hz")
@savefig p_data4.png scale=100%
nk.signal_plot(eog[0:2000], sampling_rate=100)
@suppress
plt.close()
**DataFrames**
.. ipython:: python
data = nk.data("iris")
data.head()
.. ipython:: python
data = nk.data(dataset="eogs_200hz")
@savefig p_datasets5.png scale=100%
nk.signal_plot(data[0:4000], standardize=True, sampling_rate=200)
@suppress
plt.close()
.. ipython:: python
data = nk.data(dataset="bio_resting_5min_100hz")
@savefig p_datasets6.png scale=100%
nk.standardize(data).plot()
@suppress
plt.close()
.. ipython:: python
data = nk.data(dataset="bio_resting_8min_100hz")
@savefig p_datasets7.png scale=100%
nk.standardize(data).plot()
@suppress
plt.close()
.. ipython:: python
data = nk.data("bio_resting_8min_200hz")
data.keys()
data["S01"].head()
.. ipython:: python
data = nk.data("bio_eventrelated_100hz")
@savefig p_data8.png scale=100%
nk.standardize(data).plot()
@suppress
plt.close()
.. ipython:: python
raw = nk.data("eeg_1min_200hz")
@savefig p_data9.png scale=100%
nk.signal_plot(raw.get_data()[0:3, 0:2000], sampling_rate=200)
@suppress
plt.close()
"""
# TODO: one could further improve this function with like
# selectors 'ecg=True, eda=True, restingstate=True' that would
# find the most appropriate dataset
dataset = dataset.lower()
# TODO: change this path back to "master"
path = "https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/"
# Signals as vectors =======================
if dataset in ["eeg", "eeg_150hz", "eeg.txt"]:
return pd.read_csv(path + "eeg.txt").values[:, 0]
if dataset in ["rsp", "rsp_1000hz", "rsp_1000hz.txt"]:
return pd.read_csv(path + "rsp_1000hz.txt", header=None).values[:, 0]
if dataset in ["ecg", "ecg_1000hz", "ecg_1000hz.csv"]:
return pd.read_csv(path + "ecg_1000hz.csv")["ECG"].values
if dataset in ["ecg_3000hz", "ecg_3000hz.csv"]:
return pd.read_csv(path + "ecg_1000hz.csv")["ECG"].values
if dataset in ["eog", "veog", "eog_100hz", "eog_100hz.csv"]:
return pd.read_csv(path + "eog_100hz.csv")["vEOG"].values
# Dataframes ===============================
if dataset == "iris":
info = sklearn_datasets.load_iris()
data = pd.DataFrame(
info.data, columns=["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"]
)
data["Species"] = info.target_names[info.target]
return data
if dataset in ["eogs", "eogs_200hz", "eog_200hz", "eog_200hz.csv"]:
return pd.read_csv(path + "eog_200hz.csv")
# Add extension
if dataset in ["bio_resting_8min_200hz"]:
dataset += ".json"
# Specific case for json file
if dataset.endswith(".json"):
if "https" not in dataset:
data = pd.read_json(path + dataset, orient="index")
else:
data = pd.read_json(dataset, orient="index")
df = {}
for participant, row in data.iterrows():
for _, data_string in row.items():
data_list = json.loads(data_string)
data_pd = pd.DataFrame(data_list)
df[participant] = data_pd
return df
# TODO: Add more EEG (fif and edf datasets)
if dataset in ["eeg_1min_200hz"]:
return pickle.load(
urllib.request.urlopen(
"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true"
)
)
# General case
file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable
if ext == "":
df = pd.read_csv(path + dataset + ".csv")
else:
if "https" not in dataset:
df = pd.read_csv(path + dataset)
else:
df = pd.read_csv(dataset)
return df
| 7,455 | 29.432653 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/data/read_acqknowledge.py | # -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from ..signal import signal_resample
def read_acqknowledge(
filename, sampling_rate="max", resample_method="interpolation", impute_missing=True
):
"""**Read and format a BIOPAC's AcqKnowledge file into a pandas' dataframe**
The function outputs both the dataframe and the sampling rate (retrieved from the
AcqKnowledge file).
Parameters
----------
filename : str
Filename (with or without the extension) of a BIOPAC's AcqKnowledge file (e.g., ``"data.acq"``).
sampling_rate : int
Sampling rate (in Hz, i.e., samples/second). Since an AcqKnowledge file can contain
signals recorded at different rates, harmonization is necessary in order to convert it
to a DataFrame. Thus, if `sampling_rate` is set to ``max`` (default), will keep the maximum
recorded sampling rate and upsample the channels with lower rate if necessary (using the
:func:`.signal_resample()` function). If the sampling rate is set to a given value, will
resample the signals to the desired value. Note that the value of the sampling rate is
outputted along with the data.
resample_method : str
Method of resampling (see :func:`.signal_resample()`).
impute_missing : bool
Sometimes, due to connections issues, there are lapses in the recorded signal (short
periods without signal). If ``impute_missing`` is ``True``, will automatically fill the
signal interruptions using padding.
Returns
----------
df : DataFrame
The AcqKnowledge file as a pandas dataframe.
sampling rate: int
The sampling rate at which the data is sampled.
See Also
--------
.signal_resample
Example
----------
.. ipython:: python
import neurokit2 as nk
# data, sampling_rate = nk.read_acqknowledge('file.acq')
"""
# Try loading bioread
try:
import bioread
except ImportError:
raise ImportError(
"NeuroKit error: read_acqknowledge(): the 'bioread' module is required",
" for this function to run. ",
"Please install it first (`pip install bioread`).",
)
# Check filename
if ".acq" not in filename:
filename += ".acq"
if os.path.exists(filename) is False:
raise ValueError(
"NeuroKit error: read_acqknowledge(): couldn't"
" find the following file: " + filename
)
# Read file
file = bioread.read(filename)
# Get desired frequency
if sampling_rate == "max":
freq_list = []
for channel in file.named_channels:
freq_list.append(file.named_channels[channel].samples_per_second)
sampling_rate = np.max(freq_list)
# Loop through channels
data = {}
for channel in file.named_channels:
signal = np.array(file.named_channels[channel].data)
# Fill signal interruptions
if impute_missing is True and np.isnan(np.sum(signal)):
signal = pd.Series(signal).fillna(method="pad").values
# Resample if necessary
if file.named_channels[channel].samples_per_second != sampling_rate:
signal = signal_resample(
signal,
sampling_rate=file.named_channels[channel].samples_per_second,
desired_sampling_rate=sampling_rate,
method=resample_method,
)
data[channel] = signal
# Sanitize lengths
lengths = []
for channel in data:
lengths += [len(data[channel])]
if len(set(lengths)) > 1: # If different lengths
length = pd.Series(lengths).mode()[0] # Find most common (target length)
for channel in data:
if len(data[channel]) > length:
data[channel] = data[channel][0:length]
if len(data[channel]) < length:
data[channel] = np.concatenate(
[
data[channel],
np.full((length - len(data[channel])), data[channel][-1]),
]
)
# Final dataframe
df = pd.DataFrame(data)
return df, sampling_rate
| 4,253 | 32.761905 | 104 | py |
NeuroKit | NeuroKit-master/neurokit2/data/__init__.py | """Submodule for NeuroKit."""
from .data import data
from .read_acqknowledge import read_acqknowledge
from .read_bitalino import read_bitalino
from .read_video import read_video
from .write_csv import write_csv
__all__ = ["read_acqknowledge", "read_bitalino", "read_video", "data", "write_csv"]
| 297 | 28.8 | 83 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/cluster_quality.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.spatial
import sklearn.cluster
import sklearn.metrics
import sklearn.mixture
import sklearn.model_selection
from ..misc import check_random_state
def cluster_quality(data, clustering, clusters=None, info=None, n_random=10, random_state=None, **kwargs):
"""**Assess Clustering Quality**
Compute quality of the clustering using several metrics.
Parameters
----------
data : np.ndarray
A matrix array of data (e.g., channels, sample points of M/EEG data)
clustering : DataFrame
Information about the distance of samples from their respective clusters, generated from
:func:`.cluster`.
clusters : np.ndarray
Coordinates of cluster centers, which has a shape of n_clusters x n_features, generated
from :func:`.cluster`.
info : dict
Information about the number of clusters, the function and model used for clustering,
generated from :func:`.cluster`.
n_random : int
The number of random initializations to cluster random data for calculating the GAP
statistic.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
**kwargs
Other argument to be passed on, for instance ``GFP`` as ``'sd'`` in microstates.
Returns
-------
individual : DataFrame
Indices of cluster quality scores for each sample point.
general : DataFrame
Indices of cluster quality scores for all clusters.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Load the iris dataset
data = nk.data("iris").drop("Species", axis=1)
# Cluster
clustering, clusters, info = nk.cluster(data, method="kmeans", n_clusters=3)
# Compute indices of clustering quality
individual, general = nk.cluster_quality(data, clustering, clusters, info)
general
References
----------
* Tibshirani, R., Walther, G., & Hastie, T. (2001). Estimating the number of clusters in a
data set via the gap statistic. Journal of the Royal Statistical Society: Series B
(Statistical Methodology), 63(2), 411-423.
* Mohajer, M., Englmeier, K. H., & Schmid, V. J. (2011). A comparison of Gap statistic
definitions with and without logarithm function. arXiv preprint arXiv:1103.4767.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# Sanity checks
if isinstance(clustering, tuple):
clustering, clusters, info = clustering
if isinstance(data, pd.DataFrame):
data = data.values
if isinstance(clustering, pd.DataFrame):
clustering = clustering["Cluster"].values
individual, general = _cluster_quality_sklearn(data, clustering, clusters)
# Individual distance from centroid
distance = _cluster_quality_distance(data, clusters)
distance = {"Clustering_Distance_" + str(i): distance[:, i] for i in range(distance.shape[1])}
individual.update(distance)
individual = pd.DataFrame(individual)
# Variance explained
general["Score_VarianceExplained"] = _cluster_quality_variance(data, clusters, clustering)
general["Score_GEV"], _ = _cluster_quality_gev(data, clusters, clustering, **kwargs)
general["Score_CrossValidation"] = _cluster_quality_crossvalidation(data, clusters, clustering)
# Dispersion
general["Dispersion"] = _cluster_quality_dispersion(data, clustering, **kwargs)
# Gap statistic
general.update(_cluster_quality_gap(data, clusters, clustering, info, n_random=n_random, rng=rng))
# Mixture models
if "sklearn_model" in info:
if isinstance(info["sklearn_model"], sklearn.mixture.GaussianMixture):
general["Score_AIC"] = info["sklearn_model"].aic(data)
general["Score_BIC"] = info["sklearn_model"].bic(data)
general["Score_LogLikelihood"] = info["sklearn_model"].score(data)
sklearn.model_selection.cross_val_score(info["sklearn_model"], data, cv=10)
general = pd.DataFrame.from_dict(general, orient="index").T
return individual, general
# =============================================================================
# Utils
# =============================================================================
def _cluster_quality_sklearn(data, clustering, clusters):
"""Metrics from sklearn"""
n_clusters = len(clusters)
# Individual scores
individual = {}
if n_clusters == 1:
individual["Clustering_Silhouette"] = np.full(len(clustering), np.nan)
else:
individual["Clustering_Silhouette"] = sklearn.metrics.silhouette_samples(data, clustering)
# General clustering quality scores
general = {"n_Clusters": n_clusters}
if n_clusters == 1:
general["Score_Silhouette"] = np.nan
general["Score_Calinski"] = np.nan
general["Score_Bouldin"] = np.nan
else:
general["Score_Silhouette"] = sklearn.metrics.silhouette_score(data, clustering)
general["Score_Calinski"] = sklearn.metrics.calinski_harabasz_score(data, clustering)
general["Score_Bouldin"] = sklearn.metrics.davies_bouldin_score(data, clustering)
return individual, general
def _cluster_quality_distance(data, clusters, to_dataframe=False):
"""Distance between samples and clusters"""
distance = scipy.spatial.distance.cdist(data, clusters)
if to_dataframe is True:
distance = pd.DataFrame(distance).add_prefix("Distance_")
return distance
def _cluster_quality_sumsquares(data, clusters, clustering):
"""Sumsquares of the distance of each data point to its respective cluster"""
distance = _cluster_quality_distance(data, clusters)
min_distance = [] # distance from each sample to the centroid of its cluster
for idx in range(len(data)):
cluster_identity = clustering[idx]
min_distance.append(distance[idx, cluster_identity])
min_distance_squared = [i**2 for i in min_distance]
return np.sum(min_distance_squared)
def _cluster_quality_dispersion(data, clustering, n_clusters=4):
"""Sumsquares of the distances between samples within each clusters.
An error measure for a n_clusters cluster where the lower the better.
Can be used to compare and find the optimal number of clusters.
"""
dispersion_state = np.full(n_clusters, np.nan)
for i in range(n_clusters):
idx = clustering == i
data_state = data[idx, :]
state_size = len(data_state) # number of samples in this cluster
if state_size > 0:
# pair-wise distance between members of the same cluster
distance = scipy.spatial.distance.cdist(data_state, data_state)
# sumsquares of distances
dispersion_state[i] = 0.5 * np.nansum(distance**2) / state_size
else:
dispersion_state[i] = np.nan
dispersion = np.nansum(dispersion_state)
return dispersion
def _cluster_quality_variance(data, clusters, clustering):
"""Variance explained by clustering"""
sum_squares_within = _cluster_quality_sumsquares(data, clusters, clustering)
sum_squares_total = np.sum(scipy.spatial.distance.pdist(data) ** 2) / data.shape[0]
return (sum_squares_total - sum_squares_within) / sum_squares_total
def _cluster_quality_gap(data, clusters, clustering, info, n_random=10, rng=None):
"""GAP statistic and modified GAP statistic by Mohajer (2011).
The GAP statistic compares the total within intra-cluster variation for different values of k
with their expected values under null reference distribution of the data.
"""
dispersion = _cluster_quality_sumsquares(data, clusters, clustering)
mins, maxs = np.min(data, axis=0), np.max(data, axis=0)
dispersion_random = np.full(n_random, np.nan)
for i in range(n_random):
# Random data
random_data = rng.uniform(size=data.shape)
# Rescale random
m = (maxs - mins) / (np.max(random_data, axis=0) - np.min(random_data, axis=0))
b = mins - (m * np.min(random_data, axis=0))
random_data = np.array(m) * random_data + np.array(b)
# Cluster random
_, random_clusters, info = info["clustering_function"](random_data)
random_activation = random_clusters.dot(random_data.T)
random_clustering = np.argmax(np.abs(random_activation), axis=0)
dispersion_random[i] = _cluster_quality_sumsquares(
random_data, random_clusters, random_clustering
)
# Compute GAP
gap = np.mean(np.log(dispersion_random)) - np.log(dispersion)
# Compute standard deviation
sd_k = np.sqrt(np.mean((np.log(dispersion_random) - np.mean(np.log(dispersion_random))) ** 2.0))
s_k = np.sqrt(1.0 + 1.0 / n_random) * sd_k
# Calculate Gap* statistic by Mohajer (2011)
gap_star = np.mean(dispersion_random) - dispersion
sd_k_star = np.sqrt(np.mean((dispersion_random - dispersion) ** 2.0))
s_k_star = np.sqrt(1.0 + 1.0 / n_random) * sd_k_star
out = {
"Score_GAP": gap,
"Score_GAPmod": gap_star,
"Score_GAP_sk": s_k,
"Score_GAPmod_sk": s_k_star,
}
return out
def _cluster_quality_crossvalidation(data, clusters, clustering):
"""Cross-validation index
The original code by https://github.com/Frederic-vW/eeg_microstates/blob/master/eeg_microstates.py#L600
leads to an error when the denominator is 0.
"""
n_rows, n_cols = data.shape # n_sample, n_channel
var = np.nansum(data**2) - np.nansum(np.nansum(clusters[clustering, :] * data, axis=1) ** 2)
var /= n_rows * (n_cols - 1)
denominator = (n_cols - len(clusters) - 1) ** 2
if np.abs(denominator) > 0:
cv = var * (n_cols - 1) ** 2 / denominator
else:
# warnings.warn(
# "Number of columns in data (" + str(n_cols) + ") is smaller "
# "than the number of cluster (" + str(len(clusters)) + ") plus 1. "
# "Returning the residual noise instead."
# )
cv = np.nan
# cv = var * (n_cols - 1)**2 / len(clusters)
return cv
# def _cluster_quality_gev(data, clusters, clustering, sd=None):
# if sd is None:
# sd = np.std(data, axis=1)
#
# # Normalize row-wise (across columns)
# clusters /= np.sqrt(np.sum(clusters**2, axis=1, keepdims=True))
# activation = np.dot(data, clusters.T)
# activation /= (data.shape[1] * np.outer(sd, np.std(clusters, axis=1)))
#
# gev = np.zeros(len(clusters))
# for k in range(len(clusters)):
# idx = (clustering == k)
# gev[k] = np.sum(sd[idx]**2 * activation[idx, k]**2)
# gev_total = np.sum(gev) / np.sum(sd ** 2)
# return gev_total
def _cluster_quality_gev(data, clusters, clustering, sd=None, n_clusters=4):
"""Global Variance Explained (GEV)"""
if sd is None:
sd = np.nanstd(data, axis=1)
map_corr = _correlate_vectors(data.T, clusters[clustering].T)
gev_all = np.zeros(n_clusters)
for state in range(n_clusters):
idx = clustering == state
gev_all[state] = np.nansum((sd[idx] * map_corr[idx]) ** 2) / np.nansum(sd**2)
gev = np.nansum(gev_all)
# gev = np.sum((sd * map_corr) ** 2) / np.sum(sd**2)
return gev, gev_all
def _correlate_vectors(A, B, axis=0):
"""Compute pairwise correlation of multiple pairs of vectors.
Fast way to compute correlation of multiple pairs of vectors without
computing all pairs as would with corr(A,B). Borrowed from Oli at Stack
overflow.
Note the resulting coefficients vary slightly from the ones
obtained from corr due differences in the order of the calculations.
(Differences are of a magnitude of 1e-9 to 1e-17 depending of the tested
data).
Parameters
----------
A : array
The first collection of vectors of shape (n, m)
B : array
The second collection of vectors of shape (n, m)
axis : int
The axis that contains the elements of each vector. Defaults to 0.
Returns
-------
corr : array
For each pair of vectors, the correlation between them with shape (m, )
"""
An = A - np.nanmean(A, axis=axis)
Bn = B - np.nanmean(B, axis=axis)
An /= np.linalg.norm(An, axis=axis)
Bn /= np.linalg.norm(Bn, axis=axis)
return np.nansum(An * Bn, axis=axis)
| 12,509 | 36.794562 | 107 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/density_bandwidth.py | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import scipy.stats
def density_bandwidth(x, method="KernSmooth", resolution=401):
"""**Bandwidth Selection for Density Estimation**
Bandwidth selector for :func:`.density` estimation. See ``bw_method`` argument in
:func:`.scipy.stats.gaussian_kde`.
The ``"KernSmooth"`` method is adapted from the ``dpik()`` function from the *KernSmooth* R
package. In this case, it estimates the optimal AMISE bandwidth using the direct plug-in method
with 2 levels for the Parzen-Rosenblatt estimator with Gaussian kernel.
Parameters
-----------
x : Union[list, np.array, pd.Series]
A vector of values.
method : float
The bandwidth of the kernel. The larger the values, the smoother the estimation. Can be a
number, or ``"scott"`` or ``"silverman"``
(see ``bw_method`` argument in :func:`.scipy.stats.gaussian_kde`), or ``"KernSmooth"``.
resolution : int
Only when ``method="KernSmooth"``. The number of equally-spaced points over which binning
is performed to obtain kernel functional approximation (see ``gridsize`` argument in ``KernSmooth::dpik()``).
Returns
-------
float
Bandwidth value.
See Also
--------
density
Examples
--------
.. ipython:: python
import neurokit2 as nk
x = np.random.normal(0, 1, size=100)
bw = nk.density_bandwidth(x)
bw
nk.density_bandwidth(x, method="scott")
nk.density_bandwidth(x, method=1)
@savefig p_density_bandwidth.png scale=100%
x, y = nk.density(signal, bandwidth=bw, show=True)
@suppress
plt.close()
References
----------
* Jones, W. M. (1995). Kernel Smoothing, Chapman & Hall.
"""
if isinstance(method, str):
method = method.lower()
if isinstance(method, (float, int)) or method != "kernsmooth":
return scipy.stats.gaussian_kde(x, bw_method=method).factor
n = len(x)
stdev = np.nanstd(x, ddof=1)
iqr = np.diff(np.percentile(x, [25, 75]))[0] / 1.349
scalest = min(stdev, iqr)
data_scaled = (x - np.nanmean(x)) / scalest
min_scaled = np.nanmin(data_scaled)
max_scaled = np.nanmax(data_scaled)
gcounts = _density_linearbinning(
x=data_scaled,
gpoints=np.linspace(min_scaled, max_scaled, resolution),
truncate=True,
)
alpha = (2 * np.sqrt(2) ** 9 / (7 * n)) ** (1 / 9)
psi6hat = _density_bkfe(gcounts, 6, alpha, min_scaled, max_scaled)
alpha = (-3 * np.sqrt(2 / np.pi) / (psi6hat * n)) ** (1 / 7)
psi4hat = _density_bkfe(gcounts, 4, alpha, min_scaled, max_scaled)
delta_0 = 1 / ((4 * np.pi) ** (1 / 10))
output = scalest * delta_0 * (1 / (psi4hat * n)) ** (1 / 5)
return output
def _density_linearbinning(x, gpoints, truncate=True):
"""
Linear binning. Adapted from KernSmooth R package.
"""
n = len(x)
M = gpoints.shape[0]
a = gpoints[0]
b = gpoints[-1]
# initialization of gcounts:
gcounts = np.zeros(M)
Delta = (b - a) / (M - 1)
for i in range(n):
lxi = ((x[i] - a) / Delta) + 1
li = int(lxi)
rem = lxi - li
if (li >= 1) and (li < M):
gcounts[li - 1] = gcounts[li - 1] + 1 - rem
gcounts[li] = gcounts[li] + rem
elif (li < 1) and (truncate is False):
gcounts[0] = gcounts[0] + 1
elif (li >= M) and (truncate is False):
gcounts[M - 1] = gcounts[M - 1] + 1
return gcounts
def _density_bkfe(gcounts, drv, h, a, b):
"""
'bkfe' function adapted from KernSmooth R package.
"""
resol = len(gcounts)
# Set the sample size and bin width
n = np.nansum(gcounts)
delta = (b - a) / (resol - 1)
# Obtain kernel weights
tau = drv + 4
L = min(int(tau * h / delta), resol)
if L == 0:
warnings.warn(
"WARNING : Binning grid too coarse for current (small) bandwidth: consider increasing 'resolution'"
)
lvec = np.arange(L + 1)
arg = lvec * delta / h
dnorm = np.exp(-np.square(arg) / 2) / np.sqrt(2 * np.pi)
kappam = dnorm / h ** (drv + 1)
hmold0 = 1
hmold1 = arg
hmnew = 1
if drv >= 2:
for i in np.arange(2, drv + 1):
hmnew = arg * hmold1 - (i - 1) * hmold0
hmold0 = hmold1 # Compute mth degree Hermite polynomial
hmold1 = hmnew # by recurrence.
kappam = hmnew * kappam
# Now combine weights and counts to obtain estimate
P = 2 ** (int(np.log(resol + L + 1) / np.log(2)) + 1)
kappam = np.concatenate((kappam, np.zeros(P - 2 * L - 1), kappam[1:][::-1]), axis=0)
Gcounts = np.concatenate((gcounts, np.zeros(P - resol)), axis=0)
kappam = np.fft.fft(kappam)
Gcounts = np.fft.fft(Gcounts)
gcounter = gcounts * (np.real(np.fft.ifft(kappam * Gcounts)))[0:resol]
return np.nansum(gcounter) / n**2
| 4,949 | 28.464286 | 117 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/hdi.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from ..misc import find_closest
from .density import density
def hdi(x, ci=0.95, show=False, **kwargs):
"""**Highest Density Interval (HDI)**
Compute the Highest Density Interval (HDI) of a distribution. All points within this interval
have a higher probability density than points outside the interval. The HDI can be used in the
context of uncertainty characterisation of posterior distributions (in the Bayesian farmework)
as Credible Interval (CI). Unlike equal-tailed intervals that typically exclude 2.5% from each
tail of the distribution and always include the median, the HDI is not equal-tailed and
therefore always includes the mode(s) of posterior distributions.
Parameters
----------
x : Union[list, np.array, pd.Series]
A vector of values.
ci : float
Value of probability of the (credible) interval - CI (between 0 and 1) to be estimated.
Default to .95 (95%).
show : bool
If ``True``, the function will produce a figure.
**kwargs : Line2D properties
Other arguments to be passed to :func:`nk.density`.
See Also
--------
density
Returns
----------
float(s)
The HDI low and high limits.
fig
Distribution plot.
Examples
----------
.. ipython:: python
import numpy as np
import neurokit2 as nk
x = np.random.normal(loc=0, scale=1, size=100000)
@savefig p_hdi1.png scale=100%
ci_min, ci_high = nk.hdi(x, ci=0.95, show=True)
@suppress
plt.close()
"""
x_sorted = np.sort(x)
window_size = np.ceil(ci * len(x_sorted)).astype("int")
if window_size < 2:
raise ValueError(
"NeuroKit error: hdi(): `ci` is too small or x does not contain enough data points."
)
nCIs = len(x_sorted) - window_size
ciWidth = [0] * nCIs
for i in np.arange(0, nCIs):
ciWidth[i] = x_sorted[i + window_size] - x_sorted[i]
hdi_low = x_sorted[ciWidth.index(np.min(ciWidth))]
hdi_high = x_sorted[ciWidth.index(np.min(ciWidth)) + window_size]
if show is True:
_hdi_plot(x, hdi_low, hdi_high, **kwargs)
return hdi_low, hdi_high
def _hdi_plot(vals, hdi_low, hdi_high, ci=0.95, **kwargs):
x, y = density(vals, show=False, **kwargs)
where = np.full(len(x), False)
where[0 : find_closest(hdi_low, x, return_index=True)] = True
where[find_closest(hdi_high, x, return_index=True) : :] = True
fig, ax = plt.subplots() # pylint: disable=unused-variable
ax.plot(x, y, color="white")
ax.fill_between(
x,
y,
where=where,
color="#E91E63",
label="CI {:.0%} [{:.2f}, {:.2f}]".format(ci, hdi_low, hdi_high),
)
ax.fill_between(x, y, where=~where, color="#2196F3")
ax.legend(loc="upper right")
| 2,905 | 29.270833 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/correlation.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
def cor(x, y, method="pearson", show=False):
"""**Density estimation**
Computes kernel density estimates.
Parameters
-----------
x : Union[list, np.array, pd.Series]
Vectors of values.
y : Union[list, np.array, pd.Series]
Vectors of values.
method : str
Correlation method. Can be one of ``"pearson"``, ``"spearman"``, ``"kendall"``.
show : bool
Draw a scatterplot with a regression line.
Returns
-------
r
The correlation coefficient.
Examples
--------
.. ipython:: python
import neurokit2 as nk
x = [1, 2, 3, 4, 5]
y = [3, 1, 5, 6, 6]
@savefig p_cor1.png scale=100%
corr = nk.cor(x, y, method="pearson", show=True)
@suppress
plt.close()
corr
"""
r, p = _cor_methods(x, y, method) # pylint: disable=unused-variable
if show is True:
_cor_plot(x, y)
return r
# =============================================================================
# Internals
# =============================================================================
def _cor_methods(x, y, method="pearson"):
method = method.lower()
if method in ["pearson", "pears", "p", "r"]:
r, p = scipy.stats.pearsonr(x, y)
elif method in ["spearman", "spear", "s", "rho"]:
r, p = scipy.stats.spearmanr(x, y, nan_policy="omit")
elif method in ["kendall", "kend", "k", "tau"]:
r, p = scipy.stats.kendalltau(x, y, nan_policy="omit")
else:
raise ValueError("NeuroKit error: cor(): 'method' not recognized.")
return r, p
def _cor_plot(x, y):
# Create scatter
plt.plot(x, y, "o")
# Add regresion line
m, b = np.polyfit(x, y, 1)
plt.plot(np.array(x), m * np.array(x) + b)
| 1,874 | 23.671053 | 87 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/density.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.stats
from .density_bandwidth import density_bandwidth
def density(x, desired_length=100, bandwidth="Scott", show=False, **kwargs):
"""Density estimation.
Computes kernel density estimates.
Parameters
-----------
x : Union[list, np.array, pd.Series]
A vector of values.
desired_length : int
The amount of values in the returned density estimation.
bandwidth : float
Passed to the ``method`` argument from the :func:`.density_bandwidth` function.
show : bool
Display the density plot.
**kwargs
Additional arguments to be passed to :func:`.density_bandwidth`.
Returns
-------
x
The x axis of the density estimation.
y
The y axis of the density estimation.
See Also
--------
density_bandwidth
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.ecg_simulate(duration=20)
@savefig p_density1.png scale=100%
x, y = nk.density(signal, bandwidth=0.5, show=True)
@suppress
plt.close()
.. ipython:: python
# Bandwidth comparison
_, y2 = nk.density(signal, bandwidth=1)
_, y3 = nk.density(signal, bandwidth=2)
_, y4 = nk.density(signal, bandwidth="scott")
_, y5 = nk.density(signal, bandwidth="silverman")
_, y6 = nk.density(signal, bandwidth="kernsmooth")
@savefig p_density2.png scale=100%
nk.signal_plot([y, y2, y3, y4, y5, y6],
labels=["0.5", "1", "2", "Scott", "Silverman", "KernSmooth"])
@suppress
plt.close()
"""
if "method" in kwargs:
kwargs.pop("method")
bw = density_bandwidth(x, method=bandwidth, **kwargs)
density_function = scipy.stats.gaussian_kde(x, bw_method=bw)
x = np.linspace(np.nanmin(x), np.nanmax(x), num=desired_length)
y = density_function(x)
if show is True:
pd.DataFrame({"x": x, "y": y}).plot(x="x")
return x, y
| 2,045 | 24.898734 | 87 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/cluster.py | # -*- coding: utf-8 -*-
import functools
import warnings
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.spatial
import sklearn.cluster
import sklearn.decomposition
import sklearn.mixture
from ..misc import check_random_state
from .cluster_quality import _cluster_quality_distance
def cluster(data, method="kmeans", n_clusters=2, random_state=None, optimize=False, **kwargs):
"""**Data Clustering**
Performs clustering of data using different algorithms.
* **kmod**: Modified k-means algorithm.
* **kmeans**: Normal k-means.
* **kmedoids**: k-medoids clustering, a more stable version of k-means.
* **pca**: Principal Component Analysis.
* **ica**: Independent Component Analysis.
* **aahc**: Atomize and Agglomerate Hierarchical Clustering. Computationally heavy.
* **hierarchical**
* **spectral**
* **mixture**
* **mixturebayesian**
See ``sklearn`` for methods details.
Parameters
----------
data : np.ndarray
Matrix array of data (E.g., an array (channels, times) of M/EEG data).
method : str
The algorithm for clustering. Can be one of ``"kmeans"`` (default), ``"kmod"``,
``"kmedoids"``, ``"pca"``, ``"ica"``, ``"aahc"``, ``"hierarchical"``, ``"spectral"``,
``"mixture"``, ``"mixturebayesian"``.
n_clusters : int
The desired number of clusters.
random_state : Union[int, numpy.random.RandomState]
The ``RandomState`` for the random number generator. Defaults to ``None``, in which case a
different random state is chosen each time this function is called.
optimize : bool
Optimized method in Poulsen et al. (2018) for the *k*-means modified method.
**kwargs
Other arguments to be passed into ``sklearn`` functions.
Returns
-------
clustering : DataFrame
Information about the distance of samples from their respective clusters.
clusters : np.ndarray
Coordinates of cluster centers, which has a shape of n_clusters x n_features.
info : dict
Information about the number of clusters, the function and model used for clustering.
Examples
----------
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
# Load the iris dataset
data = nk.data("iris").drop("Species", axis=1)
# Cluster using different methods
clustering_kmeans, clusters_kmeans, info = nk.cluster(data, method="kmeans", n_clusters=3)
clustering_spectral, clusters_spectral, info = nk.cluster(data, method="spectral", n_clusters=3)
clustering_hierarchical, clusters_hierarchical, info = nk.cluster(data, method="hierarchical", n_clusters=3)
clustering_agglomerative, clusters_agglomerative, info= nk.cluster(data, method="agglomerative", n_clusters=3)
clustering_mixture, clusters_mixture, info = nk.cluster(data, method="mixture", n_clusters=3)
clustering_bayes, clusters_bayes, info = nk.cluster(data, method="mixturebayesian", n_clusters=3)
clustering_pca, clusters_pca, info = nk.cluster(data, method="pca", n_clusters=3)
clustering_ica, clusters_ica, info = nk.cluster(data, method="ica", n_clusters=3)
clustering_kmod, clusters_kmod, info = nk.cluster(data, method="kmod", n_clusters=3)
clustering_kmedoids, clusters_kmedoids, info = nk.cluster(data, method="kmedoids", n_clusters=3)
clustering_aahc, clusters_aahc, info = nk.cluster(data, method='aahc_frederic', n_clusters=3)
# Visualize classification and 'average cluster'
@savefig p_cluster1.png scale=100%
fig, axes = plt.subplots(ncols=2, nrows=5)
axes[0, 0].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_kmeans['Cluster'])
axes[0, 0].scatter(clusters_kmeans[:, 2], clusters_kmeans[:, 3], c='red')
axes[0, 0].set_title("k-means")
axes[0, 1].scatter(data.iloc[:,[2]], data.iloc[:, [3]], c=clustering_spectral['Cluster'])
axes[0, 1].scatter(clusters_spectral[:, 2], clusters_spectral[:, 3], c='red')
axes[0, 1].set_title("Spectral")
axes[1, 0].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_hierarchical['Cluster'])
axes[1, 0].scatter(clusters_hierarchical[:, 2], clusters_hierarchical[:, 3], c='red')
axes[1, 0].set_title("Hierarchical")
axes[1, 1].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_agglomerative['Cluster'])
axes[1, 1].scatter(clusters_agglomerative[:, 2], clusters_agglomerative[:, 3], c='red')
axes[1, 1].set_title("Agglomerative")
axes[2, 0].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_mixture['Cluster'])
axes[2, 0].scatter(clusters_mixture[:, 2], clusters_mixture[:, 3], c='red')
axes[2, 0].set_title("Mixture")
axes[2, 1].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_bayes['Cluster'])
axes[2, 1].scatter(clusters_bayes[:, 2], clusters_bayes[:, 3], c='red')
axes[2, 1].set_title("Bayesian Mixture")
axes[3, 0].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_pca['Cluster'])
axes[3, 0].scatter(clusters_pca[:, 2], clusters_pca[:, 3], c='red')
axes[3, 0].set_title("PCA")
axes[3, 1].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_ica['Cluster'])
axes[3, 1].scatter(clusters_ica[:, 2], clusters_ica[:, 3], c='red')
axes[3, 1].set_title("ICA")
axes[4, 0].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_kmod['Cluster'])
axes[4, 0].scatter(clusters_kmod[:, 2], clusters_kmod[:, 3], c='red')
axes[4, 0].set_title("modified K-means")
axes[4, 1].scatter(data.iloc[:,[2]], data.iloc[:,[3]], c=clustering_aahc['Cluster'])
axes[4, 1].scatter(clusters_aahc[:, 2], clusters_aahc[:, 3], c='red')
axes[4, 1].set_title("AAHC (Frederic's method)")
@suppress
plt.close()
References
-----------
* Park, H. S., & Jun, C. H. (2009). A simple and fast algorithm for K-medoids
clustering. Expert systems with applications, 36(2), 3336-3341.
"""
# Sanity checks
if isinstance(data, pd.DataFrame):
data = data.values
method = method.lower()
# K-means
if method in ["kmeans", "k", "k-means", "kmean"]:
out = _cluster_kmeans(data, n_clusters=n_clusters, random_state=random_state, **kwargs)
# Modified k-means
elif method in ["kmods", "kmod", "kmeans modified", "modified kmeans"]:
out = _cluster_kmod(
data, n_clusters=n_clusters, random_state=random_state, optimize=optimize, **kwargs
)
# K-medoids
elif method in ["kmedoids", "k-medoids", "k-centers"]:
out = _cluster_kmedoids(data, n_clusters=n_clusters, random_state=random_state, **kwargs)
# PCA
elif method in ["pca", "principal", "principal component analysis"]:
out = _cluster_pca(data, n_clusters=n_clusters, random_state=random_state, **kwargs)
# ICA
elif method in ["ica", "independent", "independent component analysis"]:
out = _cluster_pca(data, n_clusters=n_clusters, random_state=random_state, **kwargs)
# Mixture
elif method in ["mixture", "mixt"]:
out = _cluster_mixture(
data, n_clusters=n_clusters, bayesian=False, random_state=random_state, **kwargs
)
# Frederic's AAHC
elif method in ["aahc_frederic", "aahc_eegmicrostates"]:
out = _cluster_aahc(data, n_clusters=n_clusters, random_state=random_state, **kwargs)
# Bayesian
elif method in ["bayesianmixture", "bayesmixt", "mixturebayesian", "mixturebayes"]:
out = _cluster_mixture(
data, n_clusters=n_clusters, bayesian=True, random_state=random_state, **kwargs
)
# Others
else:
out = _cluster_sklearn(data, n_clusters=n_clusters, **kwargs)
return out
# =============================================================================
# =============================================================================
# # Methods
# =============================================================================
# =============================================================================
# =============================================================================
# Kmeans
# =============================================================================
def _cluster_kmeans(data, n_clusters=2, random_state=None, n_init="auto", **kwargs):
"""K-means clustering algorithm"""
# Initialize clustering function
clustering_model = sklearn.cluster.KMeans(
n_clusters=n_clusters, random_state=random_state, n_init=n_init, **kwargs
)
# Fit
clustering = clustering_model.fit_predict(data)
# Get representatives (identical to _cluster_getclusters(),
# but why recompute when already included)
clusters = clustering_model.cluster_centers_
# Get distance
prediction = _cluster_quality_distance(data, clusters, to_dataframe=True)
prediction["Cluster"] = clustering
# Copy function with given parameters
clustering_function = functools.partial(
_cluster_kmeans, n_clusters=n_clusters, random_state=random_state, n_init=n_init, **kwargs
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"sklearn_model": clustering_model,
"random_state": random_state,
}
return prediction, clusters, info
# =============================================================================
# K-medoids
# =============================================================================
def _cluster_kmedoids(data, n_clusters=2, max_iterations=1000, random_state=None, **kwargs):
"""Peforms k-medoids clustering which is based on the most centrally located object in a cluster.
Less sensitive to outliers than K-means clustering.
Adapted from https://github.com/rakeshvar/kmedoids/. Original proposed algorithm from Park & Jun (2009).
"""
# Sanitize
if isinstance(data, pd.DataFrame):
data = np.array(data)
n_samples = data.shape[0]
# Step 1: Initialize random medoids
rng = check_random_state(random_state)
ids_of_medoids = rng.choice(n_samples, n_clusters, replace=False)
# Find distance between objects to their medoids, can be euclidean or manhatten
def find_distance(x, y, dist_method="euclidean"):
if dist_method == "euclidean":
return np.sqrt(np.sum(np.square(x - y), axis=-1))
elif dist_method == "manhatten":
return np.sum(np.abs(x - y), axis=-1)
individual_points = data[:, None, :]
medoid_points = data[None, ids_of_medoids, :]
distance = find_distance(individual_points, medoid_points)
# Assign each point to the nearest medoid
segmentation = np.argmin(distance, axis=1)
# Step 2: Update medoids
for i in range(max_iterations):
# Find new medoids
ids_of_medoids = np.full(n_clusters, -1, dtype=int)
for i in range(n_clusters):
indices = np.where(segmentation == i)[0]
distances = find_distance(data[indices, None, :], data[None, indices, :]).sum(axis=0)
ids_of_medoids[i] = indices[np.argmin(distances)]
# Step 3: Reassign objects to medoids
new_distances = find_distance(data[:, None, :], data[None, ids_of_medoids, :])
new_assignments = np.argmin(new_distances, axis=1)
diffs = np.mean(new_assignments != segmentation)
segmentation = new_assignments
# If more than 10% of assignments are equal to previous segmentation,
# go back to step 2 (threshold arbitrary?)
if diffs <= 0.01:
break
# Data points as centroids
clusters = data[ids_of_medoids]
# Get prediction
prediction = _cluster_quality_distance(data, clusters, to_dataframe=True)
prediction["Cluster"] = segmentation
# Copy function with given parameters
clustering_function = functools.partial(
_cluster_kmedoids,
n_clusters=n_clusters,
max_iterations=max_iterations,
random_state=random_state,
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"random_state": random_state,
"clusters": clusters,
}
return prediction, clusters, info
# =============================================================================
# Modified K-means
# =============================================================================
def _cluster_kmod(
data,
n_clusters=4,
max_iterations=1000,
threshold=1e-6,
random_state=None,
optimize=False,
**kwargs
):
"""The modified K-means clustering algorithm,
adapted from Marijn van Vliet and Frederic von Wegner.
https://github.com/wmvanvliet/mne_microstates
https://github.com/Frederic-vW/eeg_microstates
Parameters
-----------
n_clusters : int
The number of unique microstates to find. Defaults to 4.
max_iterations : int
The maximum number of iterations to perform in the k-means algorithm.
Defaults to 1000.
threshold : float
The threshold of convergence for the k-means algorithm, based on
relative change in noise variance. Defaults to 1e-6.
random_state : Union[int, numpy.random.RandomState, None]
The seed or ``RandomState`` for the random number generator. Defaults
to ``None``, in which case a different seed is chosen each time this
function is called.
optimized : bool
To use a new optimized method in https://www.biorxiv.org/content/10.1101/289850v1.full.pdf.
For the Kmeans modified method. Default to False.
**kwargs
Other arguments to be passed into ``sklearn`` functions.
Returns
-------
clustering : DataFrame
Information about the distance of samples from their respective clusters.
clusters : np.ndarray
Coordinates of cluster centers, which has a shape of n_clusters x n_features.
info : dict
Information about the number of clusters, the function and model used for clustering.
"""
n_samples, n_channels = data.shape
# Cache this value for later to compute residual
data_sum_sq = np.sum(data**2)
# Select random timepoints for our initial topographic maps
rng = check_random_state(random_state)
init_times = rng.choice(n_samples, size=n_clusters, replace=False)
# Initialize random cluster centroids
clusters = data[init_times, :]
# Normalize row-wise (across EEG channels)
clusters /= np.linalg.norm(clusters, axis=1, keepdims=True) # Normalize the maps
# Initialize iteration
prev_residual = 0
for i in range(max_iterations):
# Step 3: Assign each sample to the best matching microstate
activation = clusters.dot(data.T)
segmentation = np.argmax(np.abs(activation), axis=0)
# Step 4: Recompute the topographic maps of the microstates, based on the
# samples that were assigned to each state.
for state in np.arange(n_clusters):
# Get data fro specific state
idx = segmentation == state
data_state = data[idx, :]
# Sanity check
if np.sum(idx) == 0:
clusters[state] = 0
continue
# Retrieve map values
if optimize:
# Method 2 - optimized segmentation
state_vals = data_state.T.dot(activation[state, idx])
else:
# Method 1 - eighen value
# step 4a
Sk = np.dot(data_state.T, data_state)
# step 4b
eigen_vals, eigen_vectors = scipy.linalg.eigh(Sk)
state_vals = eigen_vectors[:, np.argmax(np.abs(eigen_vals))]
state_vals /= np.linalg.norm(state_vals) # Normalize Map
clusters[state, :] = state_vals # Store map
# Estimate residual noise (step 5)
act_sum_sq = np.sum(np.sum(clusters[segmentation, :] * data, axis=1) ** 2)
residual = np.abs(data_sum_sq - act_sum_sq)
residual = residual / float(n_samples * (n_channels - 1))
# Have we converged? Convergence criterion: variance estimate (step 6)
if np.abs(prev_residual - residual) < (threshold * residual):
break
# Next iteration
prev_residual = residual.copy()
if i == max_iterations:
warnings.warn(
"Modified K-means algorithm failed to converge after " + str(i) + "",
"iterations. Consider increasing 'max_iterations'.",
)
# De-normalize
clusters_unnormalized = _cluster_getclusters(data, segmentation)
prediction = _cluster_quality_distance(data, clusters_unnormalized, to_dataframe=True)
prediction["Cluster"] = segmentation
# Copy function with given parameters
clustering_function = functools.partial(
_cluster_kmod,
n_clusters=n_clusters,
max_iterations=max_iterations,
threshold=threshold,
random_state=random_state,
**kwargs
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"random_state": random_state,
"clusters_normalized": clusters,
"residual": residual,
}
return prediction, clusters_unnormalized, info
# =============================================================================
# PCA
# =============================================================================
def _cluster_pca(data, n_clusters=2, random_state=None, **kwargs):
"""Principal Component Analysis (PCA) for clustering."""
# Fit PCA
pca = sklearn.decomposition.PCA(
n_components=n_clusters,
copy=True,
whiten=True,
svd_solver="auto",
random_state=random_state,
**kwargs
)
pca = pca.fit(data)
# clusters = np.array([pca.components_[state, :] for state in range(n_clusters)])
# Compute variance explained
# explained_var = pca.explained_variance_ratio_
# total_explained_var = np.sum(pca.explained_variance_ratio_)
# Get distance
prediction = pca.transform(data)
prediction = pd.DataFrame(prediction).add_prefix("Loading_")
prediction["Cluster"] = prediction.abs().idxmax(axis=1).values
prediction["Cluster"] = [
np.where(prediction.columns == state)[0][0] for state in prediction["Cluster"]
]
# Recover states from clustering
clusters = _cluster_getclusters(data, prediction["Cluster"])
# Copy function with given parameters
clustering_function = functools.partial(
_cluster_pca, n_clusters=n_clusters, random_state=random_state, **kwargs
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"random_state": random_state,
}
return prediction, clusters, info
# =============================================================================
# ICA
# =============================================================================
def _cluster_ica(data, n_clusters=2, random_state=None, **kwargs):
"""Independent Component Analysis (ICA) for clustering."""
# Fit ICA
ica = sklearn.decomposition.FastICA(
n_components=n_clusters,
algorithm="parallel",
whiten=True,
fun="exp",
random_state=random_state,
**kwargs
)
ica = ica.fit(data)
# clusters = np.array([ica.components_[state, :] for state in range(n_clusters)])
# Get distance
prediction = ica.transform(data)
prediction = pd.DataFrame(prediction).add_prefix("Loading_")
prediction["Cluster"] = prediction.abs().idxmax(axis=1).values
prediction["Cluster"] = [
np.where(prediction.columns == state)[0][0] for state in prediction["Cluster"]
]
# Copy function with given parameters
clustering_function = functools.partial(
_cluster_ica, n_clusters=n_clusters, random_state=random_state, **kwargs
)
# Recover states from clustering
clusters = _cluster_getclusters(data, prediction["Cluster"])
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"random_state": random_state,
}
return prediction, clusters, info
# =============================================================================
# SKLEARN
# =============================================================================
def _cluster_sklearn(data, method="spectral", n_clusters=2, **kwargs):
"""Spectral clustering"""
# Initialize clustering function
if method in ["spectral"]:
clustering_model = sklearn.cluster.SpectralClustering(n_clusters=n_clusters, **kwargs)
elif method in ["hierarchical", "ward"]:
clustering_model = sklearn.cluster.AgglomerativeClustering(
n_clusters=n_clusters, linkage="ward", **kwargs
)
elif method in ["agglomerative", "single"]:
clustering_model = sklearn.cluster.AgglomerativeClustering(
n_clusters=n_clusters, linkage="single", **kwargs
)
# Fit
clustering = clustering_model.fit_predict(data)
# Get representatives
clusters = _cluster_getclusters(data, clustering)
# Get distance
prediction = _cluster_quality_distance(data, clusters, to_dataframe=True)
prediction["Cluster"] = clustering
# Else, copy function
clustering_function = functools.partial(_cluster_sklearn, n_clusters=n_clusters, **kwargs)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"sklearn_model": clustering_model,
}
return prediction, clusters, info
def _cluster_mixture(data, n_clusters=2, bayesian=False, random_state=None, **kwargs):
"""Mixture model"""
# Initialize clustering function
if bayesian is False:
clustering_model = sklearn.mixture.GaussianMixture(
n_components=n_clusters, random_state=random_state, **kwargs
)
else:
clustering_model = sklearn.mixture.BayesianGaussianMixture(
n_components=n_clusters, random_state=random_state, **kwargs
)
# Fit
clustering = clustering_model.fit_predict(data)
# Get representatives
clusters = clustering_model.means_
# Get probability
prediction = clustering_model.predict_proba(data)
prediction = pd.DataFrame(prediction).add_prefix("Probability_")
prediction["Cluster"] = clustering
# Else, copy function
clustering_function = functools.partial(
_cluster_mixture, n_clusters=n_clusters, random_state=random_state, **kwargs
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"sklearn_model": clustering_model,
"random_state": random_state,
}
return prediction, clusters, info
# =============================================================================
# AAHC
# =============================================================================
def _cluster_aahc(
data,
n_clusters=2,
gfp=None,
gfp_peaks=None,
gfp_sum_sq=None,
random_state=None,
use_peaks=False,
**kwargs
):
"""Atomize and Agglomerative Hierarchical Clustering Algorithm, AAHC (Murray et al., Brain Topography, 2008),
implemented by https://github.com/Frederic-vW/eeg_microstates/blob/master/eeg_microstates.py#L518
Preprocessing steps of GFP computation are necessary for the algorithm to run. If gfp arguments are specified,
data is assumed to have been filtered out based on gfp peaks (e.g., data[:, indices]), if not specified,
gfp indices will be calculated in the algorithm and data is assumed to be the full un-preprocessed input.
"""
# Internal functions for aahc
def extract_row(A, k):
v = A[k, :]
A_ = np.vstack((A[:k, :], A[k + 1 :, :]))
return A_, v
def extract_item(A, k):
a = A[k]
A_ = A[:k] + A[k + 1 :]
return A_, a
def locmax(x):
"""Get local maxima of 1D-array
Args:
x: numeric sequence
Returns:
m: list, 1D-indices of local maxima
"""
dx = np.diff(x) # discrete 1st derivative
zc = np.diff(np.sign(dx)) # zero-crossings of dx
m = 1 + np.where(zc == -2)[0] # indices of local max.
return m
# Sanitize
if isinstance(data, pd.DataFrame):
data = np.array(data)
_, nch = data.shape
# If preprocessing is not Done already
if gfp is None and gfp_peaks is None and gfp_sum_sq is None:
gfp = data.std(axis=1)
gfp_peaks = locmax(gfp)
gfp_sum_sq = np.sum(gfp**2) # normalizing constant in GEV
if use_peaks:
maps = data[gfp_peaks, :] # initialize clusters
cluster_data = data[gfp_peaks, :] # store original gfp peak indices
else:
maps = data.copy()
cluster_data = data.copy()
else:
maps = data.copy()
cluster_data = data.copy()
n_maps = maps.shape[0]
# cluster indices w.r.t. original size, normalized GFP peak data
Ci = [[k] for k in range(n_maps)]
# Main loop: atomize + agglomerate
while n_maps > n_clusters:
# correlations of the data sequence with each cluster
m_x, s_x = data.mean(axis=1, keepdims=True), data.std(axis=1)
m_y, s_y = maps.mean(axis=1, keepdims=True), maps.std(axis=1)
s_xy = 1.0 * nch * np.outer(s_x, s_y)
C = np.dot(data - m_x, np.transpose(maps - m_y)) / s_xy
# microstate sequence, ignore polarity
L = np.argmax(C**2, axis=1)
# GEV (global explained variance) of cluster k
gev = np.zeros(n_maps)
for k in range(n_maps):
r = L == k
gev[k] = np.sum(gfp[r] ** 2 * C[r, k] ** 2) / gfp_sum_sq
# merge cluster with the minimum GEV
imin = np.argmin(gev)
# N => N-1
maps, _ = extract_row(maps, imin)
Ci, reC = extract_item(Ci, imin)
re_cluster = [] # indices of updated clusters
for k in reC: # map index to re-assign
c = cluster_data[k, :]
m_x, s_x = maps.mean(axis=1, keepdims=True), maps.std(axis=1)
m_y, s_y = c.mean(), c.std()
s_xy = 1.0 * nch * s_x * s_y
C = np.dot(maps - m_x, c - m_y) / s_xy
inew = np.argmax(C**2) # ignore polarity
re_cluster.append(inew)
Ci[inew].append(k)
n_maps = len(Ci)
# Update clusters
re_cluster = list(set(re_cluster)) # unique list of updated clusters
# re-clustering by eigenvector method
for i in re_cluster:
idx = Ci[i]
Vt = cluster_data[idx, :]
Sk = np.dot(Vt.T, Vt)
evals, evecs = np.linalg.eig(Sk)
c = evecs[:, np.argmax(np.abs(evals))]
c = np.real(c)
maps[i] = c / np.sqrt(np.sum(c**2))
# Get distance
prediction = _cluster_quality_distance(cluster_data, maps, to_dataframe=True)
prediction["Cluster"] = prediction.abs().idxmax(axis=1).values
prediction["Cluster"] = [
np.where(prediction.columns == state)[0][0] for state in prediction["Cluster"]
]
# Function
clustering_function = functools.partial(
_cluster_aahc, n_clusters=n_clusters, random_state=random_state, **kwargs
)
# Info dump
info = {
"n_clusters": n_clusters,
"clustering_function": clustering_function,
"random_state": random_state,
}
return prediction, maps, info
# =============================================================================
# =============================================================================
# # Utils
# =============================================================================
# =============================================================================
def _cluster_getclusters(data, clustering):
"""Get average representatives of clusters"""
n_clusters = len(np.unique(clustering))
return np.asarray([np.mean(data[np.where(clustering == i)], axis=0) for i in range(n_clusters)])
| 28,345 | 35.812987 | 116 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/cluster_findnumber.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .cluster import cluster
from .cluster_quality import cluster_quality
def cluster_findnumber(data, method="kmeans", n_max=10, show=False, **kwargs):
"""**Optimal Number of Clusters**
Find the optimal number of clusters based on different indices of quality of fit.
Parameters
----------
data : np.ndarray
An array (channels, times) of M/EEG data.
method : str
The clustering algorithm to be passed into :func:`.nk.cluster`.
n_max : int
Runs the clustering alogrithm from 1 to n_max desired clusters in :func:`.nk.cluster` with
quality metrices produced for each cluster number.
show : bool
Plot indices normalized on the same scale.
**kwargs
Other arguments to be passed into :func:`.nk.cluster` and :func:`.nk.cluster_quality`.
Returns
-------
DataFrame
The different quality scores for each number of clusters:
* Score_Silhouette
* Score_Calinski
* Score_Bouldin
* Score_VarianceExplained
* Score_GAP
* Score_GAPmod
* Score_GAP_diff
* Score_GAPmod_diff
See Also
--------
cluster, cluster_quality
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Load the iris dataset
data = nk.data("iris").drop("Species", axis=1)
# How many clusters
@savefig p_cluster_findnumber1.png scale=100%
results = nk.cluster_findnumber(data, method="kmeans", show=True)
@suppress
plt.close()
"""
results = []
for i in range(1, n_max):
# Cluster
clustering, clusters, info = cluster(data, method=method, n_clusters=i, **kwargs)
# Compute indices of clustering quality
_, quality = cluster_quality(data, clustering, clusters, info, **kwargs)
results.append(quality)
results = pd.concat(results, axis=0).reset_index(drop=True)
# Gap Diff
results["Score_GAP_diff"] = (
results["Score_GAP"] - results["Score_GAP"].shift(-1) + results["Score_GAP_sk"].shift(-1)
)
results["Score_GAPmod_diff"] = (
results["Score_GAPmod"]
- results["Score_GAPmod"].shift(-1)
+ results["Score_GAPmod_sk"].shift(-1)
)
results = results.drop(["Score_GAP_sk", "Score_GAPmod_sk"], axis=1)
if show is True:
normalized = (results - results.min()) / (results.max() - results.min())
normalized["n_Clusters"] = np.rint(np.arange(1, n_max))
normalized.columns = normalized.columns.str.replace("Score", "Normalized")
normalized.plot(x="n_Clusters")
return results
| 2,697 | 28.977778 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/fit_loess.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.linalg
def fit_loess(y, X=None, alpha=0.75, order=2):
"""**Local Polynomial Regression (LOESS)**
Performs a LOWESS (LOcally WEighted Scatter-plot Smoother) regression.
Parameters
----------
y : Union[list, np.array, pd.Series]
The response variable (the y axis).
X : Union[list, np.array, pd.Series]
Explanatory variable (the x axis). If ``None``, will treat y as a continuous signal (useful
for smoothing).
alpha : float
The parameter which controls the degree of smoothing, which corresponds to the proportion
of the samples to include in local regression.
order : int
Degree of the polynomial to fit. Can be 1 or 2 (default).
Returns
-------
array
Prediction of the LOESS algorithm.
dict
Dictionary containing additional information such as the parameters (``order`` and ``alpha``).
See Also
----------
signal_smooth, signal_detrend, fit_error
Examples
---------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
# Simulate Signal
signal = np.cos(np.linspace(start=0, stop=10, num=1000))
# Add noise to signal
distorted = nk.signal_distort(signal,
noise_amplitude=[0.3, 0.2, 0.1],
noise_frequency=[5, 10, 50])
# Smooth signal using local regression
@savefig p_fit_loess1.png scale=100%
pd.DataFrame({ "Raw": distorted, "Loess_1": nk.fit_loess(distorted, order=1)[0],
"Loess_2": nk.fit_loess(distorted, order=2)[0]}).plot()
@suppress
plt.close()
References
----------
* https://simplyor.netlify.com/loess-from-scratch-in-python-animation.en-us/
"""
if X is None:
X = np.linspace(0, 100, len(y))
assert order in [1, 2], "Deg has to be 1 or 2"
assert 0 < alpha <= 1, "Alpha has to be between 0 and 1"
assert len(X) == len(y), "Length of X and y are different"
X_domain = X
n = len(X)
span = int(np.ceil(alpha * n))
y_predicted = np.zeros(len(X_domain))
x_space = np.zeros_like(X_domain)
for i, val in enumerate(X_domain):
distance = abs(X - val)
sorted_dist = np.sort(distance)
ind = np.argsort(distance)
Nx = X[ind[:span]]
Ny = y[ind[:span]]
delx0 = sorted_dist[span - 1]
u = distance[ind[:span]] / delx0
w = (1 - u**3) ** 3
W = np.diag(w)
A = np.vander(Nx, N=1 + order)
V = np.matmul(np.matmul(A.T, W), A)
Y = np.matmul(np.matmul(A.T, W), Ny)
Q, R = scipy.linalg.qr(V)
p = scipy.linalg.solve_triangular(R, np.matmul(Q.T, Y))
y_predicted[i] = np.polyval(p, val)
x_space[i] = val
return y_predicted, {"alpha": alpha, "order": order}
| 2,919 | 27.349515 | 102 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/standardize.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
from ..misc.check_type import is_string
from .mad import mad
def standardize(data, robust=False, window=None, **kwargs):
"""**Standardization of data**
Performs a standardization of data (Z-scoring), i.e., centering and scaling, so that the data is
expressed in terms of standard deviation (i.e., mean = 0, SD = 1) or Median Absolute Deviance
(median = 0, MAD = 1).
Parameters
----------
data : Union[list, np.array, pd.Series]
Raw data.
robust : bool
If ``True``, centering is done by substracting the median from the variables and dividing
it by the median absolute deviation (MAD). If ``False``, variables are standardized by
substracting the mean and dividing it by the standard deviation (SD).
window : int
Perform a rolling window standardization, i.e., apply a standardization on a window of the
specified number of samples that rolls along the main axis of the signal. Can be used for
complex detrending.
**kwargs : optional
Other arguments to be passed to :func:`.pandas.rolling`.
Returns
----------
list
The standardized values.
Examples
----------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
# Simple example
nk.standardize([3, 1, 2, 4, 6, np.nan])
nk.standardize([3, 1, 2, 4, 6, np.nan], robust=True)
nk.standardize(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).T)
nk.standardize(pd.DataFrame({"A": [3, 1, 2, 4, 6, np.nan],
"B": [3, 1, 2, 4, 6, 5]}))
# Rolling standardization of a signal
signal = nk.signal_simulate(frequency=[0.1, 2], sampling_rate=200)
z = nk.standardize(signal, window=200)
@savefig p_standardize1.png scale=100%
nk.signal_plot([signal, z], standardize=True)
@suppress
plt.close()
"""
# Return appropriate type
if isinstance(data, list):
if any(is_string(data)):
out = data
warn(
"The data is not standardized."
"Some elements in the list is of string type.",
category=NeuroKitWarning
)
else:
out = list(_standardize(np.array(data), robust=robust, window=window, **kwargs))
elif isinstance(data, pd.DataFrame):
# only standardize columns that are not string and are not nan
_data = data.loc[:, ~is_string(data) & ~np.array(data.isnull().all())]
to_append = data.loc[:, is_string(data) | np.array(data.isnull().all())]
out = pd.DataFrame(_standardize(_data, robust=robust, window=window, **kwargs))
out = pd.concat([to_append, out], axis=1)
elif isinstance(data, pd.Series):
if is_string(data):
out = data
warn(
"The data is not standardized as it is of string type.",
category=NeuroKitWarning)
else:
out = pd.Series(_standardize(data, robust=robust, window=window, **kwargs))
else:
if is_string(data):
out = data
warn(
"The data is not standardized as it is of string type.",
category=NeuroKitWarning)
else:
out = _standardize(data, robust=robust, window=window, **kwargs)
return out
# =============================================================================
# Internals
# =============================================================================
def _standardize(data, robust=False, window=None, **kwargs):
# Compute standardized on whole data
if window is None:
if robust is False:
z = (data - np.nanmean(data, axis=0)) / np.nanstd(data, axis=0, ddof=1)
else:
z = (data - np.nanmedian(data, axis=0)) / mad(data)
# Rolling standardization on windows
else:
df = pd.DataFrame(data) # Force dataframe
if robust is False:
z = (df - df.rolling(window, min_periods=0, **kwargs).mean()) / df.rolling(
window, min_periods=0, **kwargs
).std(ddof=1)
else:
z = (df - df.rolling(window, min_periods=0, **kwargs).median()) / df.rolling(
window, min_periods=0, **kwargs
).apply(mad)
# Fill the created nans
z = z.fillna(method="bfill")
# Restore to vector or array
if z.shape[1] == 1:
z = z[0].values
else:
z = z.values
return z
| 4,654 | 32.014184 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/rescale.py | # -*- coding: utf-8 -*-
import numpy as np
def rescale(data, to=[0, 1], scale=None):
"""**Rescale data**
Rescale a numeric variable to a new range.
Parameters
----------
data : Union[list, np.array, pd.Series]
Raw data.
to : list
New range of values of the data after rescaling.
scale : list
A list or tuple of two values specifying the actual range
of the data. If ``None``, the minimum and the maximum of the
provided data will be used.
Returns
----------
list
The rescaled values.
Examples
----------
.. ipython:: python
import neurokit2 as nk
nk.rescale([3, 1, 2, 4, 6], to=[0, 1])
"""
# Return appropriate type
if isinstance(data, list):
data = list(_rescale(np.array(data), to=to, scale=scale))
else:
data = _rescale(data, to=to, scale=scale)
return data
# =============================================================================
# Internals
# =============================================================================
def _rescale(data, to=[0, 1], scale=None):
if scale is None:
scale = [np.nanmin(data), np.nanmax(data)]
return (to[1] - to[0]) / (scale[1] - scale[0]) * (data - scale[0]) + to[0]
| 1,291 | 22.925926 | 79 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/fit_polynomial.py | # -*- coding: utf-8 -*-
import numpy as np
import sklearn.linear_model
import sklearn.metrics
from .fit_error import fit_rmse
def fit_polynomial(y, X=None, order=2, method="raw"):
"""**Polynomial Regression**
Performs a polynomial regression of given order.
Parameters
----------
y : Union[list, np.array, pd.Series]
The response variable (the y axis).
X : Union[list, np.array, pd.Series]
Explanatory variable (the x axis). If ``None``, will treat y as a continuous signal.
order : int
The order of the polynomial. 0, 1 or > 1 for a baseline, linear or polynomial fit,
respectively. Can also be ``"auto"``, in which case it will attempt to find the optimal
order to minimize the RMSE.
method : str
If ``"raw"`` (default), compute standard polynomial coefficients. If ``"orthogonal"``,
compute orthogonal polynomials (and is equivalent to R's ``poly`` default behavior).
Returns
-------
array
Prediction of the regression.
dict
Dictionary containing additional information such as the parameters (``order``) used and the coefficients (``coefs``).
See Also
----------
signal_detrend, fit_error, fit_polynomial_findorder
Examples
---------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
y = np.cos(np.linspace(start=0, stop=10, num=100))
@savefig p_fit_polynomial1.png scale=100%
pd.DataFrame({"y": y,
"Poly_0": nk.fit_polynomial(y, order=0)[0],
"Poly_1": nk.fit_polynomial(y, order=1)[0],
"Poly_2": nk.fit_polynomial(y, order=2)[0],
"Poly_3": nk.fit_polynomial(y, order=3)[0],
"Poly_5": nk.fit_polynomial(y, order=5)[0],
"Poly_auto": nk.fit_polynomial(y, order='auto')[0]}).plot()
@suppress
plt.close()
"""
if X is None:
X = np.linspace(0, 100, len(y))
# Optimal order
if isinstance(order, str):
order = fit_polynomial_findorder(y, X, max_order=6)
# Make prediction
if method == "raw":
y_predicted, coefs = _fit_polynomial(y, X, order=order)
else:
y_predicted, coefs = _fit_polynomial_orthogonal(y, X, order=order)
return y_predicted, {
"order": order,
"coefs": coefs,
"R2": sklearn.metrics.r2_score(y, y_predicted),
}
# =============================================================================
# Find order
# =============================================================================
def fit_polynomial_findorder(y, X=None, max_order=6):
"""Polynomial Regression.
Find the optimal order for polynomial fitting. Currently, the only method implemented is
RMSE minimization.
Parameters
----------
y : Union[list, np.array, pd.Series]
The response variable (the y axis).
X : Union[list, np.array, pd.Series]
Explanatory variable (the x axis). If 'None', will treat y as a continuous signal.
max_order : int
The maximum order to test.
Returns
-------
int
Optimal order.
See Also
----------
fit_polynomial
Examples
---------
import neurokit2 as nk
y = np.cos(np.linspace(start=0, stop=10, num=100))
nk.fit_polynomial_findorder(y, max_order=10)
9
"""
# TODO: add cross-validation or some kind of penalty to prevent over-fitting?
if X is None:
X = np.linspace(0, 100, len(y))
best_rmse = 0
for order in range(max_order):
y_predicted, _ = _fit_polynomial(y, X, order=order)
rmse = fit_rmse(y, y_predicted)
if rmse < best_rmse or best_rmse == 0:
best_order = order
return best_order
# =============================================================================
# Internals
# =============================================================================
def _fit_polynomial(y, X, order=2):
coefs = np.polyfit(X, y, order)
# Generating weights and model for polynomial function with a given degree
y_predicted = np.polyval(coefs, X)
return y_predicted, coefs
def _fit_polynomial_orthogonal(y, X, order=2):
"""Fit an orthogonal polynomial regression in Python (equivalent to R's poly())
from sklearn.datasets import load_iris
import pandas as pd
df = load_iris()
df = pd.DataFrame(data=df.data, columns=df.feature_names)
y = df.iloc[:, 0].values # Sepal.Length
X = df.iloc[:, 1].values # Sepal.Width
_fit_polynomial_orthogonal(y, X, order=2) # doctest: +SKIP
# Equivalent to R's:
# coef(lm(Sepal.Length ~ poly(Sepal.Width, 2), data=iris))
"""
X = np.transpose([X**k for k in range(order + 1)])
X = np.linalg.qr(X)[0][:, 1:]
model = sklearn.linear_model.LinearRegression().fit(X, y)
return model.predict(X), np.insert(model.coef_, 0, model.intercept_)
| 4,961 | 29.819876 | 126 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/mad.py | # -*- coding: utf-8 -*-
import numpy as np
def mad(x, constant=1.4826, **kwargs):
"""**Median Absolute Deviation: a "robust" version of standard deviation**
Parameters
----------
x : Union[list, np.array, pd.Series]
A vector of values.
constant : float
Scale factor. Use 1.4826 for results similar to default R.
Returns
----------
float
The MAD.
Examples
----------
.. ipython:: python
import neurokit2 as nk
nk.mad([2, 8, 7, 5, 4, 12, 5, 1])
References
-----------
* https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
median = np.nanmedian(np.ma.array(x).compressed(), **kwargs)
mad_value = np.nanmedian(np.abs(x - median), **kwargs)
mad_value = mad_value * constant
return mad_value
| 815 | 21.054054 | 78 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/fit_error.py | # -*- coding: utf-8 -*-
import numpy as np
def fit_error(y, y_predicted, n_parameters=2):
"""**Calculate the fit error for a model**
Also specific and direct access functions can be used, such as :func:`.fit_mse`,
:func:`.fit_rmse` and :func:`.fit_r2`.
Parameters
----------
y : Union[list, np.array, pd.Series]
The response variable (the y axis).
y_predicted : Union[list, np.array, pd.Series]
The fitted data generated by a model.
n_parameters : int
Number of model parameters (for the degrees of freedom used in R2).
Returns
-------
dict
A dictionary containing different indices of fit error.
See Also
--------
fit_mse, fit_rmse, fit_r2
Examples
--------
.. ipython:: python
import neurokit2 as nk
y = np.array([-1.0, -0.5, 0, 0.5, 1])
y_predicted = np.array([0.0, 0, 0, 0, 0])
# Master function
x = nk.fit_error(y, y_predicted)
x
# Direct access
nk.fit_mse(y, y_predicted)
nk.fit_rmse(y, y_predicted)
nk.fit_r2(y, y_predicted, adjusted=False)
nk.fit_r2(y, y_predicted, adjusted=True, n_parameters=2)
"""
# Get information
SSE, n, df = _fit_error_prepare(y, y_predicted, n_parameters)
# Mean squared error
MSE = SSE / n
# Root mean squared error
RMSE = np.sqrt(SSE / n)
# Adjusted r-squared
# For optimization use 1 - adjR2 since we want to minimize the function
SST = np.std(y) * n
# Get R2
if SST == 0:
R2 = 1
else:
R2 = SSE / SST
# R2 adjusted
R2_adjusted = 1 - (1 - (1 - R2)) * (n - 1) / df
return {"SSE": SSE, "MSE": MSE, "RMSE": RMSE, "R2": R2, "R2_adjusted": R2_adjusted}
# =============================================================================
# Direct accessors
# =============================================================================
def fit_mse(y, y_predicted):
"""Compute Mean Square Error (MSE)."""
return fit_error(y, y_predicted)["MSE"]
def fit_rmse(y, y_predicted):
"""Compute Root Mean Square Error (RMSE)."""
return fit_error(y, y_predicted)["RMSE"]
def fit_r2(y, y_predicted, adjusted=True, n_parameters=2):
"""Compute R2."""
if adjusted is True:
return fit_error(y, y_predicted, n_parameters=n_parameters)["R2_adjusted"]
return fit_error(y, y_predicted, n_parameters=n_parameters)["R2"]
# =============================================================================
# Internals
# =============================================================================
def _fit_error_prepare(y, y_predicted, n_parameters=2):
# n, i.e., how many observations (signal length)
n = len(y)
# Sanitize
if n != len(y_predicted):
raise TypeError("NeuroKit error: fit_error(): 'y' and 'y_predicted' are not of the same length.")
# Residual, i.e. the difference between data and model
residual = y - y_predicted
# Degrees of freedom, i.e., number of observations (length of signal) minus number of parameters
df = n - n_parameters
# Calculate sum of squared errors
SSE = np.sum(residual ** 2)
return SSE, n, df
| 3,199 | 25.666667 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/__init__.py | """Submodule for NeuroKit."""
from .cluster import cluster
from .cluster_findnumber import cluster_findnumber
from .cluster_quality import cluster_quality
from .correlation import cor
from .density import density
from .density_bandwidth import density_bandwidth
from .distance import distance
from .fit_error import fit_error, fit_mse, fit_r2, fit_rmse
from .fit_loess import fit_loess
from .fit_mixture import fit_mixture
from .fit_polynomial import fit_polynomial, fit_polynomial_findorder
from .hdi import hdi
from .mad import mad
from .rescale import rescale
from .standardize import standardize
from .summary import summary_plot
__all__ = [
"standardize",
"hdi",
"mad",
"cor",
"density",
"density_bandwidth",
"distance",
"rescale",
"fit_loess",
"fit_polynomial",
"fit_polynomial_findorder",
"fit_mixture",
"fit_error",
"fit_mse",
"fit_rmse",
"fit_r2",
"summary_plot",
"cluster",
"cluster_quality",
"cluster_findnumber",
]
| 1,009 | 23.047619 | 68 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/distance.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy
import scipy.spatial
from .standardize import standardize
def distance(X=None, method="mahalanobis"):
"""**Distance**
Compute distance using different metrics.
Parameters
----------
X : array or DataFrame
A dataframe of values.
method : str
The method to use. One of ``"mahalanobis"`` or ``"mean"`` for the average distance from the
mean.
Returns
-------
array
Vector containing the distance values.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Load the iris dataset
data = nk.data("iris").drop("Species", axis=1)
data["Distance"] = nk.distance(data, method="mahalanobis")
@savefig p_distance1.png scale=100%
fig = data.plot(x="Petal.Length", y="Petal.Width", s="Distance", c="Distance", kind="scatter")
@suppress
plt.close()
.. ipython:: python
data["DistanceZ"] = np.abs(nk.distance(data.drop("Distance", axis=1), method="mean"))
@savefig p_distance2.png scale=100%
fig = data.plot(x="Petal.Length", y="Sepal.Length", s="DistanceZ", c="DistanceZ", kind="scatter")
@suppress
plt.close()
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
method = method.lower() # remove capitalised letters
if method in ["mahalanobis"]:
dist = _distance_mahalanobis(X)
elif method in ["mean", "center", "average"]:
dist = _distance_mean(X)
else:
raise ValueError("NeuroKit error: distance(): 'method' should be one of 'mahalanobis'.")
return dist
# =============================================================================
# Methods
# =============================================================================
def _distance_mahalanobis(X=None):
cov = X.cov().values
cov = scipy.linalg.inv(cov)
col_means = X.mean().values
dist = np.full(len(X), np.nan)
for i in range(len(X)):
dist[i] = scipy.spatial.distance.mahalanobis(X.iloc[i, :].values, col_means, cov) ** 2
return dist
def _distance_mean(X=None):
Z = standardize(X)
dist = Z.mean(axis=1).values
return dist
| 2,251 | 24.590909 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/summary.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
from .density import density
from .rescale import rescale
def summary_plot(x, errorbars=0, **kwargs):
"""**Descriptive plot**
Visualize a distribution with density, histogram, boxplot and rugs plots all at once.
Examples
--------
.. ipython:: python
import neurokit2 as nk
import numpy as np
x = np.random.normal(size=100)
@savefig p_summary1.png scale=100%
fig = nk.summary_plot(x)
@suppress
plt.close()
"""
if "ax" in kwargs:
fig = None
ax = kwargs.get("ax")
kwargs.pop("ax")
else:
fig, ax = plt.subplots()
# Histogram
counts, bins = np.histogram(x, **kwargs)
bin_centers = 0.5*(bins[1:] + bins[:-1])
if errorbars > 0:
samperr = np.std(counts) / np.sqrt(counts) * (st.norm.ppf(1-(1-errorbars)/2))
ax.errorbar(bin_centers, counts, yerr=samperr, ecolor="#FF8C00", fmt='.', capsize=5, capthick=2)
ax.hist(bins[:-1], bins, weights=counts, color="#2196F3", edgecolor="white", zorder=1, **kwargs)
# Density
x_axis, y_axis = density(x, **kwargs)
y_axis = rescale(y_axis, to=[0, np.max(counts)])
ax.plot(x_axis, y_axis, color="#E91E63", linewidth=1.5, zorder=2, **kwargs)
# Points
y_axis = np.full(len(x), 0.1)
ax.scatter(x, y_axis, c="black", alpha=0.5, marker="|", zorder=3, **kwargs)
# Boxplot
ax.boxplot(
x,
vert=False,
positions=[np.max(counts) / 10],
widths=np.max(counts) / 10,
manage_ticks=False,
boxprops=dict(linewidth=1.5),
medianprops=dict(linewidth=1.5),
whiskerprops=dict(linewidth=1.5),
capprops=dict(linewidth=1.5),
zorder=4,
**kwargs
)
return fig
| 1,845 | 25.753623 | 104 | py |
NeuroKit | NeuroKit-master/neurokit2/stats/fit_mixture.py | # -*- coding: utf-8 -*-
import pandas as pd
import sklearn.mixture
def fit_mixture(X=None, n_clusters=2):
"""**Gaussian Mixture Model**
Performs a polynomial regression of given order.
Parameters
----------
X : Union[list, np.array, pd.Series]
The values to classify.
n_clusters : int
Number of components to look for.
Returns
-------
pd.DataFrame
DataFrame containing the probability of belonging to each cluster.
dict
Dictionary containing additional information such as the parameters (:func:`.n_clusters`).
See Also
----------
signal_detrend, fit_error
Examples
---------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
x = nk.signal_simulate()
probs, info = nk.fit_mixture(x, n_clusters=2) # Rmb to merge with main to return ``info``
@savefig p_fit_mixture.png scale=100%
fig = nk.signal_plot([x, probs["Cluster_0"], probs["Cluster_1"]], standardize=True)
@suppress
plt.close()
"""
if X.ndim == 1:
X = X.reshape(-1, 1)
# fit a Gaussian Mixture Model with two components
clf = sklearn.mixture.GaussianMixture(n_components=n_clusters, random_state=333)
clf = clf.fit(X)
# Get predicted probabilities
predicted = clf.predict_proba(X)
probabilities = pd.DataFrame(predicted).add_prefix("Cluster_")
return probabilities, {"n_clusters": n_clusters}
| 1,465 | 25.178571 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_eventrelated.py | # -*- coding: utf-8 -*-
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
def ppg_eventrelated(epochs, silent=False):
"""**Performs event-related PPG analysis on epochs**
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial, usually obtained
via :func:`.epochs_create`, or a DataFrame containing all epochs, usually obtained
via :func:`.epochs_to_df`.
silent : bool
If ``True``, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed PPG features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features
consist of the following:
* ``"PPG_Rate_Baseline"``: the baseline heart rate (at stimulus onset).
* ``"PPG_Rate_Max"``: the maximum heart rate after stimulus onset.
* ``"PPG_Rate_Min"``: the minimum heart rate after stimulus onset.
* ``"PPG_Rate_Mean"``: the mean heart rate after stimulus onset.
* ``"PPG_Rate_SD"``: the standard deviation of the heart rate after stimulus onset.
* ``"PPG_Rate_Max_Time"``: the time at which maximum heart rate occurs.
* ``"PPG_Rate_Min_Time"``: the time at which minimum heart rate occurs.
We also include the following *experimental* features related to the parameters of a
quadratic model:
* ``"PPG_Rate_Trend_Linear"``: The parameter corresponding to the linear trend.
* ``"PPG_Rate_Trend_Quadratic"``: The parameter corresponding to the curvature.
* ``"PPG_Rate_Trend_R2"``: the quality of the quadratic model. If too low, the parameters
might not be reliable or meaningful.
See Also
--------
events_find, epochs_create, ppg_process
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example with simulated data
ppg, info = nk.ppg_process(nk.ppg_simulate(duration=20))
# Process the data
epochs = nk.epochs_create(ppg, events=[5000, 10000, 15000],
epochs_start=-0.1, epochs_end=1.9)
nk.ppg_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="ppg", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize empty container
# Rate
data[i] = _eventrelated_rate(epochs[i], data[i], var="PPG_Rate")
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
# Return dataframe
return _eventrelated_sanitizeoutput(data)
| 2,862 | 31.168539 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..misc import as_vector
from ..misc.report import create_report
from ..signal import signal_rate
from ..signal.signal_formatpeaks import _signal_from_indices
from .ppg_clean import ppg_clean
from .ppg_findpeaks import ppg_findpeaks
from .ppg_methods import ppg_methods
from .ppg_plot import ppg_plot
def ppg_process(
ppg_signal, sampling_rate=1000, method="elgendi", report=None, **kwargs
):
"""**Process a photoplethysmogram (PPG) signal**
Convenience function that automatically processes a photoplethysmogram signal.
Parameters
----------
ppg_signal : Union[list, np.array, pd.Series]
The raw PPG channel.
sampling_rate : int
The sampling frequency of :func:`.ppg_signal` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"elgendi"``.
Defaults to ``"elgendi"``.
report : str
The filename of a report containing description and figures of processing
(e.g. ``"myreport.html"``). Needs to be supplied if a report file
should be generated. Defaults to ``None``. Can also be ``"text"`` to
just print the text in the console without saving anything.
**kwargs
Other arguments to be passed to specific methods. For more information,
see :func:`.ppg_methods`.
Returns
-------
signals : DataFrame
A DataFrame of same length as :func:`.ppg_signal` containing the following columns:
* ``"PPG_Raw"``: the raw signal.
* ``"PPG_Clean"``: the cleaned signal.
* ``"PPG_Rate"``: the heart rate as measured based on PPG peaks.
* ``"PPG_Peaks"``: the PPG peaks marked as "1" in a list of zeros.
info : dict
A dictionary containing the information of peaks and the signals' sampling rate.
See Also
--------
ppg_clean, ppg_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
ppg = nk.ppg_simulate(duration=10, sampling_rate=1000, heart_rate=70)
signals, info = nk.ppg_process(ppg, sampling_rate=1000)
@savefig p_ppg_process1.png scale=100%
nk.ppg_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
ppg_signal = as_vector(ppg_signal)
methods = ppg_methods(sampling_rate=sampling_rate, method=method, **kwargs)
# Clean signal
ppg_cleaned = ppg_clean(
ppg_signal,
sampling_rate=sampling_rate,
method=methods["method_cleaning"],
**methods["kwargs_cleaning"]
)
# Find peaks
info = ppg_findpeaks(
ppg_cleaned,
sampling_rate=sampling_rate,
method=methods["method_peaks"],
**methods["kwargs_peaks"]
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
# Mark peaks
peaks_signal = _signal_from_indices(
info["PPG_Peaks"], desired_length=len(ppg_cleaned)
)
# Rate computation
rate = signal_rate(
info["PPG_Peaks"], sampling_rate=sampling_rate, desired_length=len(ppg_cleaned)
)
# Prepare output
signals = pd.DataFrame(
{
"PPG_Raw": ppg_signal,
"PPG_Clean": ppg_cleaned,
"PPG_Rate": rate,
"PPG_Peaks": peaks_signal,
}
)
if report is not None:
# Generate report containing description and figures of processing
if ".html" in str(report):
fig = ppg_plot(signals, sampling_rate=sampling_rate)
else:
fig = None
create_report(file=report, signals=signals, info=methods, fig=fig)
return signals, info
| 3,664 | 29.289256 | 91 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_intervalrelated.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..hrv import hrv
def ppg_intervalrelated(data, sampling_rate=1000):
"""**Performs PPG analysis on longer periods of data (typically > 10 seconds), such as
resting-state data**
Parameters
----------
data : Union[dict, pd.DataFrame]
A DataFrame containing the different processed signal(s) as different columns, typically
generated by :func:`.ppg_process`. Can also take a dict containing sets of
separately processed DataFrames.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Returns
-------
DataFrame
A dataframe containing the analyzed PPG features. The analyzed features consist of the following:
* ``"PPG_Rate_Mean"``: the mean heart rate.
* ``"HRV"``: the different heart rate variability metrices.
See :func:`.hrv` docstrings for details.
See Also
--------
ppg_process
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.ppg_process(data["PPG"], sampling_rate=100)
# Single dataframe is passed
nk.ppg_intervalrelated(df, sampling_rate=100)
epochs = nk.epochs_create(df, events=[0, 15000], sampling_rate=100,
epochs_end=150)
nk.ppg_intervalrelated(epochs)
"""
intervals = {}
# Format input
if isinstance(data, pd.DataFrame):
rate_cols = [col for col in data.columns if "PPG_Rate" in col]
if len(rate_cols) == 1:
intervals.update(_ppg_intervalrelated_formatinput(data))
intervals.update(*_ppg_intervalrelated_hrv(data, sampling_rate))
else:
raise ValueError(
"NeuroKit error: ppg_intervalrelated(): Wrong input,"
"we couldn't extract heart rate. Please make sure"
"your DataFrame contains a `PPG_Rate` column."
)
ppg_intervals = pd.DataFrame.from_dict(intervals, orient="index").T
elif isinstance(data, dict):
for epoch in data:
intervals[epoch] = {} # Initialize empty container
# Add label info
intervals[epoch]["Label"] = data[epoch]["Label"].iloc[0]
# Rate
intervals[epoch].update(_ppg_intervalrelated_formatinput(data[epoch]))
# HRV
intervals[epoch].update(
*_ppg_intervalrelated_hrv(data[epoch], sampling_rate)
)
ppg_intervals = pd.DataFrame.from_dict(intervals, orient="index")
return ppg_intervals
# =============================================================================
# Internals
# =============================================================================
def _ppg_intervalrelated_formatinput(data):
# Sanitize input
colnames = data.columns.values
if "PPG_Rate" not in colnames:
raise ValueError(
"NeuroKit error: ppg_intervalrelated(): Wrong input,"
"we couldn't extract heart rate. Please make sure"
"your DataFrame contains a `PPG_Rate` column."
)
signal = data["PPG_Rate"].values
PPG_Rate_Mean = np.mean(signal)
return {"PPG_Rate_Mean": PPG_Rate_Mean}
def _ppg_intervalrelated_hrv(data, sampling_rate):
# Sanitize input
colnames = data.columns.values
if "PPG_Peaks" not in colnames:
raise ValueError(
"NeuroKit error: ppg_intervalrelated(): Wrong input,"
"we couldn't extract peaks. Please make sure"
"your DataFrame contains a `PPG_Peaks` column."
)
# Transform rpeaks from "signal" format to "info" format.
peaks = np.where(data["PPG_Peaks"].values)[0]
peaks = {"PPG_Peaks": peaks}
results = hrv(peaks, sampling_rate=sampling_rate)
return results.astype("float").to_dict("records")
| 4,011 | 29.165414 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def ppg_plot(ppg_signals, sampling_rate=None, static=True):
"""**Visualize photoplethysmogram (PPG) data**
Visualize the PPG signal processing.
Parameters
----------
ppg_signals : DataFrame
DataFrame obtained from :func:`.ppg_process`.
sampling_rate : int
The sampling frequency of the PPG (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to ``None``.
static : bool
If True, a static plot will be generated with matplotlib.
If False, an interactive plot will be generated with plotly.
Defaults to True.
Returns
-------
fig
Figure representing a plot of the processed PPG signals.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate data
ppg = nk.ppg_simulate(duration=10, sampling_rate=1000, heart_rate=70)
# Process signal
signals, info = nk.ppg_process(ppg, sampling_rate=1000)
# Plot
@savefig p_ppg_plot1.png scale=100%
nk.ppg_plot(signals)
@suppress
plt.close()
See Also
--------
ppg_process
"""
# Sanity-check input.
if not isinstance(ppg_signals, pd.DataFrame):
raise ValueError(
"NeuroKit error: The `ppg_signals` argument must"
" be the DataFrame returned by `ppg_process()`."
)
# X-axis
if sampling_rate is not None:
x_axis = np.linspace(0, ppg_signals.shape[0] / sampling_rate, ppg_signals.shape[0])
else:
x_axis = np.arange(0, ppg_signals.shape[0])
# Get peak indices
peaks = np.where(ppg_signals["PPG_Peaks"] == 1)[0]
if static:
# Prepare figure
fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
if sampling_rate is not None:
ax0.set_xlabel("Time (seconds)")
ax1.set_xlabel("Time (seconds)")
elif sampling_rate is None:
ax0.set_xlabel("Samples")
ax1.set_xlabel("Samples")
fig.suptitle("Photoplethysmogram (PPG)", fontweight="bold")
plt.tight_layout(h_pad=0.4)
# Plot cleaned and raw PPG
ax0.set_title("Raw and Cleaned Signal")
ax0.plot(x_axis, ppg_signals["PPG_Raw"], color="#B0BEC5", label="Raw", zorder=1)
ax0.plot(
x_axis,
ppg_signals["PPG_Clean"],
color="#FB1CF0",
label="Cleaned",
zorder=1,
linewidth=1.5,
)
# Plot peaks
ax0.scatter(
x_axis[peaks],
ppg_signals["PPG_Clean"][peaks],
color="#D60574",
label="Peaks",
zorder=2,
)
ax0.legend(loc="upper right")
# Rate
ax1.set_title("Heart Rate")
ppg_rate_mean = ppg_signals["PPG_Rate"].mean()
ax1.plot(
x_axis,
ppg_signals["PPG_Rate"],
color="#FB661C",
label="Rate",
linewidth=1.5,
)
ax1.axhline(y=ppg_rate_mean, label="Mean", linestyle="--", color="#FBB41C")
ax1.legend(loc="upper right")
return fig
else:
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError as e:
raise ImportError(
"NeuroKit error: ppg_plot(): the 'plotly'",
" module is required when 'static' is False.",
" Please install it first (`pip install plotly`).",
) from e
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
subplot_titles=("Raw and Cleaned Signal", "Rate"),
)
# Plot cleaned and raw PPG
fig.add_trace(go.Scatter(x=x_axis, y=ppg_signals["PPG_Raw"], name="Raw"), row=1, col=1)
fig.add_trace(
go.Scatter(
x=x_axis,
y=ppg_signals["PPG_Clean"],
name="Cleaned",
marker_color="#FB1CF0",
),
row=1,
col=1,
)
# Plot peaks
fig.add_trace(
go.Scatter(
x=x_axis[peaks],
y=ppg_signals["PPG_Clean"][peaks],
name="Peaks",
mode="markers",
marker_color="#D60574",
),
row=1,
col=1,
)
# Rate
ppg_rate_mean = ppg_signals["PPG_Rate"].mean()
fig.add_trace(
go.Scatter(
x=x_axis,
y=ppg_signals["PPG_Rate"],
name="Rate",
mode="lines",
marker_color="#FB661C",
),
row=2,
col=1,
)
fig.add_hline(
y=ppg_rate_mean,
line_dash="dash",
line_color="#FBB41C",
name="Mean",
row=2,
col=1,
)
fig.update_layout(title_text="Photoplethysmogram (PPG)", height=500, width=750)
if sampling_rate is not None:
fig.update_xaxes(title_text="Time (seconds)", row=1, col=1)
fig.update_xaxes(title_text="Time (seconds)", row=2, col=1)
elif sampling_rate is None:
fig.update_xaxes(title_text="Samples", row=1, col=1)
fig.update_xaxes(title_text="Samples", row=2, col=1)
return fig
| 5,587 | 28.723404 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_fillmissing, signal_filter
def ppg_clean(ppg_signal, sampling_rate=1000, heart_rate=None, method="elgendi"):
"""**Clean a photoplethysmogram (PPG) signal**
Prepare a raw PPG signal for systolic peak detection.
Parameters
----------
ppg_signal : Union[list, np.array, pd.Series]
The raw PPG channel.
heart_rate : Union[int, float]
The heart rate of the PPG signal. Applicable only if method is ``"nabian2018"`` to check
that filter frequency is appropriate.
sampling_rate : int
The sampling frequency of the PPG (in Hz, i.e., samples/second). The default is 1000.
method : str
The processing pipeline to apply. Can be one of ``"elgendi"``, ``"nabian2018"``, or ``"none"``.
The default is ``"elgendi"``. If ``"none"`` is passed, the raw signal will be returned without
any cleaning.
Returns
-------
clean : array
A vector containing the cleaned PPG.
See Also
--------
ppg_simulate, ppg_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
import matplotlib.pyplot as plt
# Simulate and clean signal
ppg = nk.ppg_simulate(heart_rate=75, duration=30)
ppg_elgendi = nk.ppg_clean(ppg, method='elgendi')
ppg_nabian = nk.ppg_clean(ppg, method='nabian2018', heart_rate=75)
# Plot and compare methods
signals = pd.DataFrame({'PPG_Raw' : ppg,
'PPG_Elgendi' : ppg_elgendi,
'PPG_Nabian' : ppg_nabian})
@savefig p_ppg_clean1.png scale=100%
signals.plot()
@suppress
plt.close()
References
----------
* Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S. (2018).
An open-source feature extraction tool for the analysis of peripheral physiological data.
IEEE Journal of Translational Engineering in Health and Medicine, 6, 1-11.
"""
ppg_signal = as_vector(ppg_signal)
# Missing data
n_missing = np.sum(np.isnan(ppg_signal))
if n_missing > 0:
warn(
"There are " + str(n_missing) + " missing data points in your signal."
" Filling missing values using `signal_fillmissing`.",
category=NeuroKitWarning,
)
ppg_signal = signal_fillmissing(ppg_signal, method="both")
method = str(method).lower()
if method in ["elgendi"]:
clean = _ppg_clean_elgendi(ppg_signal, sampling_rate)
elif method in ["nabian2018"]:
clean = _ppg_clean_nabian2018(ppg_signal, sampling_rate, heart_rate=heart_rate)
elif method in ["none"]:
clean = ppg_signal
else:
raise ValueError(
"`method` not found. Must be one of 'elgendi', 'nabian2018', or 'none'."
)
return clean
# =============================================================================
# Methods
# =============================================================================
def _ppg_clean_elgendi(ppg_signal, sampling_rate):
filtered = signal_filter(
ppg_signal,
sampling_rate=sampling_rate,
lowcut=0.5,
highcut=8,
order=3,
method="butterworth",
)
return filtered
def _ppg_clean_nabian2018(ppg_signal, sampling_rate, heart_rate=None):
"""Low-pass filter for continuous BP signal preprocessing, adapted from Nabian et al. (2018)."""
# Determine low-pass filter value
highcut = 40
# Convert heart rate to seconds, check if low-pass filter within appropriate range
if heart_rate is not None:
heart_rate = heart_rate / 60
if not highcut >= 10 * heart_rate and not highcut < 0.5 * sampling_rate:
raise ValueError(
"Highcut value should be at least 10 times heart rate and"
" less than 0.5 times sampling rate."
)
filtered = signal_filter(
ppg_signal,
sampling_rate=sampling_rate,
lowcut=None,
highcut=highcut,
order=2,
method="butterworth",
)
return filtered
| 4,274 | 29.978261 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_findpeaks.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
from ..signal import signal_smooth
def ppg_findpeaks(ppg_cleaned, sampling_rate=1000, method="elgendi", show=False, **kwargs):
"""**Find systolic peaks in a photoplethysmogram (PPG) signal**
Parameters
----------
ppg_cleaned : Union[list, np.array, pd.Series]
The cleaned PPG channel as returned by :func:`.ppg_clean`.
sampling_rate : int
The sampling frequency of the PPG (in Hz, i.e., samples/second). The default is 1000.
method : str
The processing pipeline to apply. Can be one of ``"elgendi"``, ``"bishop"``. The default is
``"elgendi"``.
show : bool
If ``True``, returns a plot of the thresholds used during peak detection. Useful for
debugging. The default is ``False``.
Returns
-------
info : dict
A dictionary containing additional information, in this case the samples at which systolic
peaks occur, accessible with the key ``"PPG_Peaks"``.
See Also
--------
ppg_simulate, ppg_clean
Examples
--------
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
ppg = nk.ppg_simulate(heart_rate=75, duration=20, sampling_rate=50)
ppg_clean = nk.ppg_clean(ppg, sampling_rate=50)
@savefig p_ppg_findpeaks1.png scale=100%
peaks = nk.ppg_findpeaks(ppg_clean, sampling_rate=100, show=True)
@suppress
plt.close()
# Method by Bishop et al., (2018)
@savefig p_ppg_findpeaks2.png scale=100%
peaks = nk.ppg_findpeaks(ppg, method="bishop", show=True)
@suppress
plt.close()
References
----------
* Elgendi, M., Norton, I., Brearley, M., Abbott, D., & Schuurmans, D. (2013). Systolic peak
detection in acceleration photoplethysmograms measured from emergency responders in tropical
conditions. PloS one, 8(10), e76585.
* Bishop, S. M., & Ercole, A. (2018). Multi-scale peak and trough detection optimised for
periodic and quasi-periodic neuroscience data. In Intracranial Pressure & Neuromonitoring XVI
(pp. 189-195). Springer International Publishing.
"""
method = method.lower()
if method in ["elgendi"]:
peaks = _ppg_findpeaks_elgendi(ppg_cleaned, sampling_rate, show=show, **kwargs)
elif method in ["msptd", "bishop2018", "bishop"]:
peaks, _ = _ppg_findpeaks_bishop(ppg_cleaned, show=show, **kwargs)
else:
raise ValueError("`method` not found. Must be one of the following: 'elgendi', 'bishop'.")
# Prepare output.
info = {"PPG_Peaks": peaks}
return info
def _ppg_findpeaks_elgendi(
signal,
sampling_rate=1000,
peakwindow=0.111,
beatwindow=0.667,
beatoffset=0.02,
mindelay=0.3,
show=False,
):
"""Implementation of Elgendi M, Norton I, Brearley M, Abbott D, Schuurmans D (2013) Systolic Peak Detection in
Acceleration Photoplethysmograms Measured from Emergency Responders in Tropical Conditions. PLoS ONE 8(10): e76585.
doi:10.1371/journal.pone.0076585.
All tune-able parameters are specified as keyword arguments. `signal` must be the bandpass-filtered raw PPG
with a lowcut of .5 Hz, a highcut of 8 Hz.
"""
if show:
_, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
ax0.plot(signal, label="filtered")
# Ignore the samples with negative amplitudes and square the samples with
# values larger than zero.
signal_abs = signal.copy()
signal_abs[signal_abs < 0] = 0
sqrd = signal_abs**2
# Compute the thresholds for peak detection. Call with show=True in order
# to visualize thresholds.
ma_peak_kernel = int(np.rint(peakwindow * sampling_rate))
ma_peak = signal_smooth(sqrd, kernel="boxcar", size=ma_peak_kernel)
ma_beat_kernel = int(np.rint(beatwindow * sampling_rate))
ma_beat = signal_smooth(sqrd, kernel="boxcar", size=ma_beat_kernel)
thr1 = ma_beat + beatoffset * np.mean(sqrd) # threshold 1
if show:
ax1.plot(sqrd, label="squared")
ax1.plot(thr1, label="threshold")
ax1.legend(loc="upper right")
# Identify start and end of PPG waves.
waves = ma_peak > thr1
beg_waves = np.where(np.logical_and(np.logical_not(waves[0:-1]), waves[1:]))[0]
end_waves = np.where(np.logical_and(waves[0:-1], np.logical_not(waves[1:])))[0]
# Throw out wave-ends that precede first wave-start.
end_waves = end_waves[end_waves > beg_waves[0]]
# Identify systolic peaks within waves (ignore waves that are too short).
num_waves = min(beg_waves.size, end_waves.size)
min_len = int(np.rint(peakwindow * sampling_rate)) # this is threshold 2 in the paper
min_delay = int(np.rint(mindelay * sampling_rate))
peaks = [0]
for i in range(num_waves):
beg = beg_waves[i]
end = end_waves[i]
len_wave = end - beg
if len_wave < min_len:
continue
# Visualize wave span.
if show:
ax1.axvspan(beg, end, facecolor="m", alpha=0.5)
# Find local maxima and their prominence within wave span.
data = signal[beg:end]
locmax, props = scipy.signal.find_peaks(data, prominence=(None, None))
if locmax.size > 0:
# Identify most prominent local maximum.
peak = beg + locmax[np.argmax(props["prominences"])]
# Enforce minimum delay between peaks.
if peak - peaks[-1] > min_delay:
peaks.append(peak)
peaks.pop(0)
if show:
ax0.scatter(peaks, signal_abs[peaks], c="r")
ax0.legend(loc="upper right")
ax0.set_title("PPG Peaks (Method by Elgendi et al., 2013)")
peaks = np.asarray(peaks).astype(int)
return peaks
def _ppg_findpeaks_bishop(
signal,
show=False,
):
"""Implementation of Bishop SM, Ercole A (2018) Multi-scale peak and trough detection optimised
for periodic and quasi-periodic neuroscience data. doi:10.1007/978-3-319-65798-1_39.
Currently designed for short signals of relatively low sampling frequencies (e.g. 6 seconds at
100 Hz). Also, the function currently only returns peaks, but it does identify pulse onsets too.
"""
# TODO: create ppg_peaks() that also returns onsets and stuff
# Setup
N = len(signal)
L = int(np.ceil(N / 2) - 1)
# Step 1: calculate local maxima and local minima scalograms
# - detrend: this removes the best-fit straight line
x = scipy.signal.detrend(signal, type="linear")
# - initialise LMS matrices
m_max = np.full((L, N), False)
m_min = np.full((L, N), False)
# - populate LMS matrices
for k in range(1, L): # scalogram scales
for i in range(k + 2, N - k + 1):
if x[i - 1] > x[i - k - 1] and x[i - 1] > x[i + k - 1]:
m_max[k - 1, i - 1] = True
if x[i - 1] < x[i - k - 1] and x[i - 1] < x[i + k - 1]:
m_min[k - 1, i - 1] = True
# Step 2: find the scale with the most local maxima (or local minima)
# - row-wise summation (i.e. sum each row)
gamma_max = np.sum(m_max, axis=1)
# the "axis=1" option makes it row-wise
gamma_min = np.sum(m_min, axis=1)
# - find scale with the most local maxima (or local minima)
lambda_max = np.argmax(gamma_max)
lambda_min = np.argmax(gamma_min)
# Step 3: Use lambda to remove all elements of m for which k>lambda
m_max = m_max[: (lambda_max + 1), :]
m_min = m_min[: (lambda_min + 1), :]
# Step 4: Find peaks (and onsets)
# - column-wise summation
m_max_sum = np.sum(m_max == False, axis=0)
m_min_sum = np.sum(m_min == False, axis=0)
peaks = np.where(m_max_sum == 0)[0].astype(int)
onsets = np.where(m_min_sum == 0)[0].astype(int)
if show:
_, ax0 = plt.subplots(nrows=1, ncols=1, sharex=True)
ax0.plot(signal, label="signal")
ax0.scatter(peaks, signal[peaks], c="r")
ax0.scatter(onsets, signal[onsets], c="b")
ax0.set_title("PPG Peaks (Method by Bishop et al., 2018)")
return peaks, onsets
| 8,147 | 33.820513 | 119 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_methods.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc.report import get_kwargs
from .ppg_clean import ppg_clean
from .ppg_findpeaks import ppg_findpeaks
def ppg_methods(
sampling_rate=1000,
method="elgendi",
method_cleaning="default",
method_peaks="default",
**kwargs,
):
"""**PPG Preprocessing Methods**
This function analyzes and specifies the methods used in the preprocessing, and create a
textual description of the methods used. It is used by :func:`ppg_process()` to dispatch the
correct methods to each subroutine of the pipeline and :func:`ppg_report()` to create a
preprocessing report.
Parameters
----------
sampling_rate : int
The sampling frequency of the raw PPG signal (in Hz, i.e., samples/second).
method : str
The method used for cleaning and peak finding if ``"method_cleaning"``
and ``"method_peaks"`` are set to ``"default"``. Can be one of ``"elgendi"``.
Defaults to ``"elgendi"``.
method_cleaning: str
The method used to clean the raw PPG signal. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.ppg_clean`.
method_peaks: str
The method used to find peaks. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.ppg_findpeaks`.
**kwargs
Other arguments to be passed to :func:`.ppg_clean` and
:func:`.ppg_findpeaks`.
Returns
-------
report_info : dict
A dictionary containing the keyword arguments passed to the cleaning
and peak finding functions, text describing the methods, and the corresponding
references.
See Also
--------
ppg_process, ppg_clean, ppg_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
methods = nk.ppg_methods(sampling_rate=100, method="elgendi", method_cleaning="nabian2018")
print(methods["text_cleaning"])
print(methods["references"][0])
"""
# Sanitize inputs
method_cleaning = (
str(method).lower()
if method_cleaning == "default"
else str(method_cleaning).lower()
)
method_peaks = (
str(method).lower() if method_peaks == "default" else str(method_peaks).lower()
)
# Create dictionary with all inputs
report_info = {
"sampling_rate": sampling_rate,
"method": method,
"method_cleaning": method_cleaning,
"method_peaks": method_peaks,
**kwargs,
}
# Get arguments to be passed to cleaning and peak finding functions
kwargs_cleaning, report_info = get_kwargs(report_info, ppg_clean)
kwargs_peaks, report_info = get_kwargs(report_info, ppg_findpeaks)
# Save keyword arguments in dictionary
report_info["kwargs_cleaning"] = kwargs_cleaning
report_info["kwargs_peaks"] = kwargs_peaks
# Initialize refs list with NeuroKit2 reference
refs = ["""Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H.,
Schölzel, C., & Chen, S. A. (2021). NeuroKit2: A Python toolbox for neurophysiological signal processing.
Behavior Research Methods, 53(4), 1689–1696. https://doi.org/10.3758/s13428-020-01516-y
"""]
# 1. Cleaning
# ------------
report_info["text_cleaning"] = f"The raw signal, sampled at {sampling_rate} Hz,"
if method_cleaning in [
"elgendi",
"elgendi2013",
]:
report_info["text_cleaning"] += (
" was preprocessed using a bandpass filter ([0.5 - 8 Hz], Butterworth 3rd order;"
+ " following Elgendi et al., 2013)."
)
refs.append(
"""Elgendi M, Norton I, Brearley M, Abbott D, Schuurmans D (2013)
Systolic Peak Detection in Acceleration Photoplethysmograms
Measured from Emergency Responders in Tropical Conditions
PLoS ONE 8(10): e76585. doi:10.1371/journal.pone.0076585."""
)
elif method_cleaning in ["nabian", "nabian2018"]:
if report_info["heart_rate"] is None:
cutoff = "of 40 Hz"
else:
cutoff = f' based on the heart rate of {report_info["heart_rate"]} bpm'
report_info["text_cleaning"] = (
f" was preprocessed using a lowpass filter (with a cutoff frequency {cutoff},"
+ " butterworth 2nd order; following Nabian et al., 2018)."
)
refs.append(
"""Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S.(2018).
An open-source feature extraction tool for the analysis of peripheral physiological data.
IEEE Journal of Translational Engineering in Health and Medicine, 6, 1-11."""
)
elif method_cleaning in ["none"]:
report_info[
"text_cleaning"
] += " was directly used for peak detection without preprocessing."
else:
# just in case more methods are added
report_info["text_cleaning"] = (
"was cleaned following the " + method + " method."
)
# 2. Peaks
# ----------
if method_peaks in ["elgendi", "elgendi13"]:
report_info[
"text_peaks"
] = "The peak detection was carried out using the method described in Elgendi et al. (2013)."
refs.append(
"""Elgendi M, Norton I, Brearley M, Abbott D, Schuurmans D (2013)
Systolic Peak Detection in Acceleration Photoplethysmograms
Measured from Emergency Responders in Tropical Conditions
PLoS ONE 8(10): e76585. doi:10.1371/journal.pone.0076585."""
)
elif method_peaks in ["none"]:
report_info["text_peaks"] = "There was no peak detection carried out."
else:
report_info[
"text_peaks"
] = f"The peak detection was carried out using the method {method_peaks}."
report_info["references"] = list(np.unique(refs))
return report_info
| 6,124 | 36.576687 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/__init__.py | """Submodule for NeuroKit."""
# Aliases
from ..signal import signal_rate as ppg_rate
from .ppg_analyze import ppg_analyze
from .ppg_clean import ppg_clean
from .ppg_eventrelated import ppg_eventrelated
from .ppg_findpeaks import ppg_findpeaks
from .ppg_intervalrelated import ppg_intervalrelated
from .ppg_methods import ppg_methods
from .ppg_plot import ppg_plot
from .ppg_process import ppg_process
from .ppg_simulate import ppg_simulate
__all__ = [
"ppg_simulate",
"ppg_clean",
"ppg_findpeaks",
"ppg_rate",
"ppg_process",
"ppg_plot",
"ppg_methods",
"ppg_intervalrelated",
"ppg_eventrelated",
"ppg_analyze",
]
| 654 | 23.259259 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_simulate.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from ..misc import check_random_state, check_random_state_children
from ..signal import signal_distort, signal_interpolate
def ppg_simulate(
duration=120,
sampling_rate=1000,
heart_rate=70,
frequency_modulation=0.2,
ibi_randomness=0.1,
drift=0,
motion_amplitude=0.1,
powerline_amplitude=0.01,
burst_number=0,
burst_amplitude=1,
random_state=None,
random_state_distort="spawn",
show=False,
):
"""**Simulate a photoplethysmogram (PPG) signal**
Phenomenological approximation of PPG. The PPG wave is described with four landmarks: wave
onset, location of the systolic peak, location of the dicrotic notch and location of the
diastolic peaks. These landmarks are defined as x and y coordinates (in a time series). These
coordinates are then interpolated at the desired sampling rate to obtain the PPG signal.
Parameters
----------
duration : int
Desired recording length in seconds. The default is 120.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). The default is 1000.
heart_rate : int
Desired simulated heart rate (in beats per minute). The default is 70. Note that for the
ECGSYN method, random fluctuations are to be expected to mimic a real heart rate. These
fluctuations can cause some slight discrepancies between the requested heart rate and the
empirical heart rate, especially for shorter signals.
frequency_modulation : float
Float between 0 and 1. Determines how pronounced respiratory sinus arrythmia (RSA) is
(0 corresponds to absence of RSA). The default is 0.3.
ibi_randomness : float
Float between 0 and 1. Determines how much random noise there is in the duration of each
PPG wave (0 corresponds to absence of variation). The default is 0.1.
drift : float
Float between 0 and 1. Determines how pronounced the baseline drift (.05 Hz) is
(0 corresponds to absence of baseline drift). The default is 1.
motion_amplitude : float
Float between 0 and 1. Determines how pronounced the motion artifact (0.5 Hz) is
(0 corresponds to absence of motion artifact). The default is 0.1.
powerline_amplitude : float
Float between 0 and 1. Determines how pronounced the powerline artifact (50 Hz) is
(0 corresponds to absence of powerline artifact). Note that powerline_amplitude > 0 is only
possible if ``sampling_rate`` is >= 500. The default is 0.1.
burst_amplitude : float
Float between 0 and 1. Determines how pronounced high frequency burst artifacts are
(0 corresponds to absence of bursts). The default is 1.
burst_number : int
Determines how many high frequency burst artifacts occur. The default is 0.
show : bool
If ``True``, returns a plot of the landmarks and interpolated PPG. Useful for debugging.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
random_state_distort : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
Random state to be used to distort the signal. If ``"legacy"``, use the same random state used to
generate the signal (discouraged as it creates dependent random streams). If ``"spawn"``, spawn
independent children random number generators from the random_state argument. If any of the other types,
generate independent children random number generators from the random_state_distort provided (this
allows generating multiple version of the same signal distorted by different random noise realizations).
Returns
-------
ppg : array
A vector containing the PPG.
See Also
--------
ecg_simulate, rsp_simulate, eda_simulate, emg_simulate
Examples
--------
.. ipython:: python
import neurokit2 as nk
ppg = nk.ppg_simulate(duration=40, sampling_rate=500, heart_rate=75, random_state=42)
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
random_state_distort = check_random_state_children(random_state, random_state_distort, n_children=4)
# At the requested sampling rate, how long is a period at the requested
# heart-rate and how often does that period fit into the requested
# duration?
period = 60 / heart_rate # in seconds
n_period = int(np.floor(duration / period))
periods = np.ones(n_period) * period
# Seconds at which waves begin.
x_onset = np.cumsum(periods)
x_onset -= x_onset[0] # make sure seconds start at zero
# Add respiratory sinus arrythmia (frequency modulation).
periods, x_onset = _frequency_modulation(
periods,
x_onset,
modulation_frequency=0.05,
modulation_strength=frequency_modulation,
)
# Randomly modulate duration of waves by subracting a random value between
# 0 and ibi_randomness% of the wave duration (see function definition).
x_onset = _random_x_offset(x_onset, ibi_randomness, rng)
# Corresponding signal amplitudes.
y_onset = rng.normal(0, 0.1, n_period)
# Seconds at which the systolic peaks occur within the waves.
x_sys = x_onset + rng.normal(0.175, 0.01, n_period) * periods
# Corresponding signal amplitudes.
y_sys = y_onset + rng.normal(1.5, 0.15, n_period)
# Seconds at which the dicrotic notches occur within the waves.
x_notch = x_onset + rng.normal(0.4, 0.001, n_period) * periods
# Corresponding signal amplitudes (percentage of systolic peak height).
y_notch = y_sys * rng.normal(0.49, 0.01, n_period)
# Seconds at which the diastolic peaks occur within the waves.
x_dia = x_onset + rng.normal(0.45, 0.001, n_period) * periods
# Corresponding signal amplitudes (percentage of systolic peak height).
y_dia = y_sys * rng.normal(0.51, 0.01, n_period)
x_all = np.concatenate((x_onset, x_sys, x_notch, x_dia))
x_all.sort(kind="mergesort")
x_all = np.ceil(x_all * sampling_rate).astype(int) # convert seconds to samples
y_all = np.zeros(n_period * 4)
y_all[0::4] = y_onset
y_all[1::4] = y_sys
y_all[2::4] = y_notch
y_all[3::4] = y_dia
if show:
__, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
ax0.scatter(x_all, y_all, c="r")
# Interpolate a continuous signal between the landmarks (i.e., Cartesian
# coordinates).
samples = np.arange(int(np.ceil(duration * sampling_rate)))
ppg = signal_interpolate(x_values=x_all, y_values=y_all, x_new=samples, method="akima")
# Remove NAN (values outside interpolation range, i.e., after last sample).
ppg[np.isnan(ppg)] = np.nanmean(ppg)
if show:
ax0.plot(ppg)
# Add baseline drift.
if drift > 0:
drift_freq = 0.05
if drift_freq < (1 / duration) * 2:
drift_freq = (1 / duration) * 2
ppg = signal_distort(
ppg,
sampling_rate=sampling_rate,
noise_amplitude=drift,
noise_frequency=drift_freq,
random_state=random_state_distort[0],
silent=True,
)
# Add motion artifacts.
if motion_amplitude > 0:
motion_freq = 0.5
ppg = signal_distort(
ppg,
sampling_rate=sampling_rate,
noise_amplitude=motion_amplitude,
noise_frequency=motion_freq,
random_state=random_state_distort[1],
silent=True,
)
# Add high frequency bursts.
if burst_amplitude > 0:
ppg = signal_distort(
ppg,
sampling_rate=sampling_rate,
artifacts_amplitude=burst_amplitude,
artifacts_frequency=100,
artifacts_number=burst_number,
random_state=random_state_distort[2],
silent=True,
)
# Add powerline noise.
if powerline_amplitude > 0:
ppg = signal_distort(
ppg,
sampling_rate=sampling_rate,
powerline_amplitude=powerline_amplitude,
powerline_frequency=50,
random_state=random_state_distort[3],
silent=True,
)
if show:
ax1.plot(ppg)
return ppg
def _frequency_modulation(periods, seconds, modulation_frequency, modulation_strength):
"""modulator_frequency determines the frequency at which respiratory sinus arrhythmia occurs (in Hz).
modulator_strength must be between 0 and 1.
"""
modulation_mean = 1
# Enforce minimum inter-beat-interval of 300 milliseconds.
if (modulation_mean - modulation_strength) * periods[
0
] < 0.3: # elements in periods all have the same value at this point
print(
"Skipping frequency modulation, since the modulation_strength"
f" {modulation_strength} leads to physiologically implausible"
f" wave durations of {((modulation_mean - modulation_strength) * periods[0]) * 1000}"
f" milliseconds."
)
return periods, seconds
# Apply a very conservative Nyquist criterion.
nyquist = (1 / periods[0]) * 0.1
if modulation_frequency > nyquist:
print(f"Please choose a modulation frequency lower than {nyquist}.")
# Generate a sine with mean 1 and amplitude 0.5 * modulation_strength, that is,
# ranging from 1 - 0.5 * modulation_strength to 1 + 0.5 * modulation_strength.
# For example, at a heart rate of 100 and modulation_strenght=1, the heart rate will
# fluctuate between 150 and 50. At the default modulatiom_strenght=.2, it will
# fluctuate between 110 and 90.
modulator = (
0.5 * modulation_strength * np.sin(2 * np.pi * modulation_frequency * seconds)
+ modulation_mean
)
periods_modulated = periods * modulator
seconds_modulated = np.cumsum(periods_modulated)
seconds_modulated -= seconds_modulated[0] # make sure seconds start at zero
return periods_modulated, seconds_modulated
def _random_x_offset(x, offset_weight, rng):
"""From each wave onset xi subtract offset_weight * (xi - xi-1) where xi-1 is
the wave onset preceding xi. offset_weight must be between 0 and 1.
"""
# Sanitize offset to min 0 and max .99
offset_weight = min(offset_weight, 0.99)
offset_weight = max(offset_weight, 0)
x_diff = np.diff(x)
# Enforce minimum inter-beat-interval of 300 milliseconds.
min_x_diff = min(x_diff)
if (min_x_diff - (min_x_diff * offset_weight)) < 0.3:
print(
"Skipping random IBI modulation, since the offset_weight"
f" {offset_weight} leads to physiologically implausible wave"
f" durations of {(min_x_diff - (min_x_diff * offset_weight)) * 1000}"
f" milliseconds."
)
return x
max_offsets = offset_weight * x_diff
offsets = [rng.uniform(0, i) for i in max_offsets]
x_offset = x.copy()
x_offset[1:] -= offsets
return x_offset
def _amplitude_modulation():
# TODO
pass
| 11,257 | 38.780919 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/ppg/ppg_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .ppg_eventrelated import ppg_eventrelated
from .ppg_intervalrelated import ppg_intervalrelated
def ppg_analyze(data, sampling_rate=1000, method="auto"):
"""**Photoplethysmography (PPG) Analysis**.
Performs PPG analysis on either epochs (event-related analysis) or on longer periods of data
such as resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
:func:`.epochs_create`, or a DataFrame containing all epochs, usually obtained via
:func:`.epochs_to_df`. Can also take a DataFrame of processed signals from a longer period
of data, typically generated by :func:`.ppg_process` or :func:`.bio_process`. Can also
take a dict containing sets of separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``"auto"`` where
the right method will be chosen based on the mean duration of the data (``"event-related"``
for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed PPG features. If event-related analysis is conducted,
each epoch is indicated by the ``Label`` column. See :func:`.ppg_eventrelated` and
:func:`.ppg_intervalrelated` docstrings for details.
See Also
--------
bio_process, ppg_process, epochs_create, ppg_eventrelated, ppg_intervalrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example 1: Simulate data for event-related analysis
ppg = nk.ppg_simulate(duration=20, sampling_rate=1000)
# Process data
ppg_signals, info = nk.ppg_process(ppg, sampling_rate=1000)
epochs = nk.epochs_create(ppg_signals, events=[5000, 10000, 15000],
epochs_start=-0.1, epochs_end=1.9)
# Analyze
analyze_epochs = nk.ppg_analyze(epochs, sampling_rate=1000)
analyze_epochs
# Example 2: Download the resting-state data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.ppg_process(data["PPG"], sampling_rate=100)
# Analyze
analyze_df = nk.ppg_analyze(df, sampling_rate=100)
analyze_df
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: ppg_analyze(): Wrong input or method,"
"we couldn't extract epochs features."
)
else:
features = ppg_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = ppg_intervalrelated(data, sampling_rate=sampling_rate)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = ppg_intervalrelated(data, sampling_rate=sampling_rate)
else:
features = ppg_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = ppg_intervalrelated(data, sampling_rate=sampling_rate)
else:
features = ppg_eventrelated(data)
return features
| 4,193 | 34.542373 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def emg_plot(emg_signals, sampling_rate=None, static=True):
"""**EMG Graph**
Visualize electromyography (EMG) data.
Parameters
----------
emg_signals : DataFrame
DataFrame obtained from ``emg_process()``.
sampling_rate : int
The sampling frequency of the EMG (in Hz, i.e., samples/second). Needs to be supplied if the
data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to ``None``.
static : bool
If True, a static plot will be generated with matplotlib.
If False, an interactive plot will be generated with plotly.
Defaults to True.
See Also
--------
emg_process
Returns
-------
Though the function returns nothing, the figure can be retrieved and saved as follows:
.. code-block:: console
# To be run after emg_plot()
fig = plt.gcf()
fig.savefig("myfig.png")
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate data
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
# Process signal
emg_signals, _ = nk.emg_process(emg, sampling_rate=1000)
# Plot
@savefig p_emg_plot.png scale=100%
nk.emg_plot(emg_signals)
@suppress
plt.close()
"""
# Mark onsets, offsets, activity
onsets = np.where(emg_signals["EMG_Onsets"] == 1)[0]
offsets = np.where(emg_signals["EMG_Offsets"] == 1)[0]
# Sanity-check input.
if not isinstance(emg_signals, pd.DataFrame):
raise ValueError(
"NeuroKit error: The `emg_signals` argument must"
" be the DataFrame returned by `emg_process()`."
)
# Determine what to display on the x-axis, mark activity.
if sampling_rate is not None:
x_axis = np.linspace(0, emg_signals.shape[0] / sampling_rate, emg_signals.shape[0])
else:
x_axis = np.arange(0, emg_signals.shape[0])
if static is True:
return _emg_plot_static(emg_signals, x_axis, onsets, offsets, sampling_rate)
else:
return _emg_plot_interactive(emg_signals, x_axis, onsets, offsets, sampling_rate)
# =============================================================================
# Internals
# =============================================================================
def _emg_plot_activity(emg_signals, onsets, offsets):
activity_signal = pd.Series(np.full(len(emg_signals), np.nan))
activity_signal[onsets] = emg_signals["EMG_Amplitude"][onsets].values
activity_signal[offsets] = emg_signals["EMG_Amplitude"][offsets].values
activity_signal = activity_signal.fillna(method="backfill")
if np.any(activity_signal.isna()):
index = np.min(np.where(activity_signal.isna())) - 1
value_to_fill = activity_signal[index]
activity_signal = activity_signal.fillna(value_to_fill)
return activity_signal
def _emg_plot_static(emg_signals, x_axis, onsets, offsets, sampling_rate):
# Prepare figure.
fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
if sampling_rate is not None:
ax1.set_xlabel("Time (seconds)")
elif sampling_rate is None:
ax1.set_xlabel("Samples")
fig.suptitle("Electromyography (EMG)", fontweight="bold")
plt.tight_layout(h_pad=0.2)
# Plot cleaned and raw EMG.
ax0.set_title("Raw and Cleaned Signal")
ax0.plot(x_axis, emg_signals["EMG_Raw"], color="#B0BEC5", label="Raw", zorder=1)
ax0.plot(
x_axis, emg_signals["EMG_Clean"], color="#FFC107", label="Cleaned", zorder=1, linewidth=1.5
)
ax0.legend(loc="upper right")
# Plot Amplitude.
ax1.set_title("Muscle Activation")
ax1.plot(
x_axis, emg_signals["EMG_Amplitude"], color="#FF9800", label="Amplitude", linewidth=1.5
)
# Shade activity regions.
activity_signal = _emg_plot_activity(emg_signals, onsets, offsets)
ax1.fill_between(
x_axis,
emg_signals["EMG_Amplitude"],
activity_signal,
where=emg_signals["EMG_Amplitude"] > activity_signal,
color="#f7c568",
alpha=0.5,
label=None,
)
# Mark onsets and offsets.
ax1.scatter(
x_axis[onsets], emg_signals["EMG_Amplitude"][onsets], color="#f03e65", label=None, zorder=3
)
ax1.scatter(
x_axis[offsets],
emg_signals["EMG_Amplitude"][offsets],
color="#f03e65",
label=None,
zorder=3,
)
if sampling_rate is not None:
onsets = onsets / sampling_rate
offsets = offsets / sampling_rate
for i, j in zip(list(onsets), list(offsets)):
ax1.axvline(i, color="#4a4a4a", linestyle="--", label=None, zorder=2)
ax1.axvline(j, color="#4a4a4a", linestyle="--", label=None, zorder=2)
ax1.legend(loc="upper right")
plt.close()
return fig
def _emg_plot_interactive(emg_signals, x_axis, onsets, offsets, sampling_rate):
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError:
raise ImportError(
"NeuroKit error: emg_plot(): the 'plotly' "
"module is required for this feature."
"Please install it first (`pip install plotly`)."
)
# Prepare figure.
fig = make_subplots(rows=2, cols=1, shared_xaxes=True)
fig.update_layout(title="Electromyography (EMG)", font=dict(size=18), height=600)
# Plot cleaned and raw EMG.
fig.add_trace(
go.Scatter(
x=x_axis,
y=emg_signals["EMG_Raw"],
mode="lines",
name="Raw",
line=dict(color="#B0BEC5"),
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=emg_signals["EMG_Clean"],
mode="lines",
name="Cleaned",
line=dict(color="#FFC107"),
),
row=1,
col=1,
)
# Plot Amplitude.
fig.add_trace(
go.Scatter(
x=x_axis,
y=emg_signals["EMG_Amplitude"],
mode="lines",
name="Amplitude",
line=dict(color="#FF9800"),
),
row=2,
col=1,
)
# Mark onsets and offsets.
fig.add_trace(
go.Scatter(
x=x_axis[onsets],
y=emg_signals["EMG_Amplitude"][onsets],
mode="markers",
name="Onsets",
marker=dict(color="#f03e65", size=10),
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis[offsets],
y=emg_signals["EMG_Amplitude"][offsets],
mode="markers",
name="Offsets",
marker=dict(color="#f03e65", size=10),
),
row=2,
col=1,
)
if sampling_rate is not None:
onsets = onsets / sampling_rate
offsets = offsets / sampling_rate
fig.update_xaxes(title_text="Time (seconds)", row=2, col=1)
elif sampling_rate is None:
fig.update_xaxes(title_text="Samples", row=2, col=1)
for i, j in zip(list(onsets), list(offsets)):
fig.add_shape(
type="line",
x0=i,
y0=0,
x1=i,
y1=1,
line=dict(color="#4a4a4a", width=2, dash="dash"),
row=2,
col=1,
)
fig.add_shape(
type="line",
x0=j,
y0=0,
x1=j,
y1=1,
line=dict(color="#4a4a4a", width=2, dash="dash"),
row=2,
col=1,
)
fig.update_yaxes(title_text="Amplitude", row=2, col=1)
return fig
| 7,786 | 27.947955 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .emg_eventrelated import emg_eventrelated
from .emg_intervalrelated import emg_intervalrelated
def emg_analyze(data, sampling_rate=1000, method="auto"):
"""**EMG Analysis**
Performs EMG analysis on either epochs (event-related analysis) or on longer periods of data such as resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
``epochs_create()``, or a DataFrame containing all epochs, usually obtained via
``epochs_to_df()``. Can also take a DataFrame of processed signals from a longer period of data, typically generated by
``emg_process()`` or ``bio_process()``. Can also take a dict containing sets of separate
periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``auto`` where
the right method will be chosen based on the mean duration of the data (``"event-related"`` for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed EMG features. If event-related analysis is conducted,
each epoch is indicated by the `Label` column. See :func:`emg_eventrelated` and
:func:`emg_intervalrelated` docstrings for details.
See Also
--------
.bio_process, emg_process, .epochs_create, emg_eventrelated, emg_intervalrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
# Example with simulated data
emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
epochs = nk.epochs_create(emg_signals, events=[3000, 6000, 9000], sampling_rate=1000,
epochs_start=-0.1, epochs_end=1.9)
# Event-related analysis
analyze_epochs = nk.emg_analyze(epochs, method="event-related")
analyze_epochs
# Interval-related analysis
analyze_df = nk.emg_analyze(emg_signals, method="interval-related")
analyze_df
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: emg_analyze(): Wrong input or method, we couldn't extract extract epochs features."
)
else:
features = emg_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = emg_intervalrelated(data)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = emg_intervalrelated(data)
else:
features = emg_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = emg_intervalrelated(data)
else:
features = emg_eventrelated(data)
return features
| 3,915 | 35.943396 | 127 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_eventrelated.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning
def emg_eventrelated(epochs, silent=False):
"""**Event-related EMG Analysis**
Performs event-related EMG analysis on epochs.
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial, usually obtained via ``epochs_create()``,
or a DataFrame containing all epochs, usually obtained via ``epochs_to_df()``.
silent : bool
If ``True``, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed EMG features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features consist
of the following:
* ``"EMG_Activation*``: indication of whether there is muscular activation following
the onset of the event (1 if present, 0 if absent) and if so, its corresponding
amplitude features and the number of activations in each epoch. If there is no
activation, nans are displayed for the below features.
* ``"EMG_Amplitude_Mean*``: the mean amplitude of the activity.
* ``"EMG_Amplitude_Max*``: the maximum amplitude of the activity.
* ``"EMG_Amplitude_SD*``: the standard deviation of the activity amplitude.
* ``"EMG_Amplitude_Max_Time*``: the time of maximum amplitude.
* ``"EMG_Bursts*``: the number of activations, or bursts of activity, within each epoch.
See Also
--------
emg_simulate, emg_process, .events_find, .epochs_create
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example with simulated data
emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
epochs = nk.epochs_create(emg_signals, events=[3000, 6000, 9000], sampling_rate=1000,
epochs_start=-0.1,epochs_end=1.9)
nk.emg_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="emg", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize an empty dict for the current epoch
# Activation following event
if "EMG_Onsets" not in epochs[i]:
warn(
"Input does not have an `EMG_Onsets` column." " Unable to process EMG features.",
category=NeuroKitWarning,
)
data[i]["EMG_Activation"] = 0
elif np.any(epochs[i]["EMG_Onsets"][epochs[i].index > 0] != 0):
data[i]["EMG_Activation"] = 1
else:
data[i]["EMG_Activation"] = 0
# Analyze features based on activation
if data[i]["EMG_Activation"] == 1:
data[i] = _emg_eventrelated_features(epochs[i], data[i])
else:
data[i]["EMG_Amplitude_Mean"] = np.nan
data[i]["EMG_Amplitude_Max"] = np.nan
data[i]["EMG_Amplitude_SD"] = np.nan
data[i]["EMG_Amplitude_Max_Time"] = np.nan
data[i]["EMG_Bursts"] = np.nan
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
# =============================================================================
# Internals
# =============================================================================
def _emg_eventrelated_features(epoch, output={}):
# Sanitize input
if "EMG_Activity" not in epoch or "EMG_Amplitude" not in epoch:
warn(
"Input does not have an `EMG_Activity` column or `EMG_Amplitude` column."
" Will skip computation of EMG amplitudes.",
category=NeuroKitWarning,
)
return output
# Peak amplitude and Time of peak
activations = len(np.where(epoch["EMG_Onsets"][epoch.index > 0] == 1)[0])
activated_signal = np.where(epoch["EMG_Activity"][epoch.index > 0] == 1)
mean = np.array(epoch["EMG_Amplitude"][epoch.index > 0].iloc[activated_signal]).mean()
maximum = np.array(epoch["EMG_Amplitude"][epoch.index > 0].iloc[activated_signal]).max()
index_time = np.where(epoch["EMG_Amplitude"][epoch.index > 0] == maximum)[0]
time = np.array(epoch["EMG_Amplitude"][epoch.index > 0].index[index_time])[0]
output["EMG_Amplitude_Mean"] = mean
output["EMG_Amplitude_Max"] = maximum
output["EMG_Amplitude_SD"] = np.std(
epoch["EMG_Amplitude"][epoch.index > 0].iloc[activated_signal]
)
output["EMG_Amplitude_Max_Time"] = time
output["EMG_Bursts"] = activations
return output
| 4,995 | 36.56391 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_amplitude.py | # -*- coding: utf-8 -*-
import numpy as np
from ..signal import signal_filter
def emg_amplitude(emg_cleaned):
"""**Compute electromyography (EMG) amplitude**
Compute electromyography amplitude given the cleaned respiration signal, done by calculating the
linear envelope of the signal.
Parameters
----------
emg_cleaned : Union[list, np.array, pd.Series]
The cleaned electromyography channel as returned by ``emg_clean()``.
Returns
-------
array
A vector containing the electromyography amplitude.
See Also
--------
emg_clean, emg_rate, emg_process, emg_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
cleaned = nk.emg_clean(emg, sampling_rate=1000)
amplitude = nk.emg_amplitude(cleaned)
@savefig p_emg_amplitude1.png scale=100%
fig = pd.DataFrame({"EMG": emg, "Amplitude": amplitude}).plot(subplots=True)
@suppress
plt.close()
"""
tkeo = _emg_amplitude_tkeo(emg_cleaned)
amplitude = _emg_amplitude_envelope(tkeo)
return amplitude
# =============================================================================
# Taeger-Kaiser Energy Operator
# =============================================================================
def _emg_amplitude_tkeo(emg_cleaned):
"""Calculates the Teager–Kaiser Energy operator to improve onset detection, described by Marcos Duarte at
https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb.
Parameters
----------
emg_cleaned : Union[list, np.array, pd.Series]
The cleaned electromyography channel as returned by `emg_clean()`.
Returns
-------
tkeo : array
The emg signal processed by the Teager–Kaiser Energy operator.
References
----------
- BMCLab: https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
- Li, X., Zhou, P., & Aruin, A. S. (2007). Teager–Kaiser energy operation of surface EMG improves
muscle activity onset detection. Annals of biomedical engineering, 35(9), 1532-1538.
"""
tkeo = emg_cleaned.copy()
# Teager–Kaiser Energy operator
tkeo[1:-1] = emg_cleaned[1:-1] * emg_cleaned[1:-1] - emg_cleaned[:-2] * emg_cleaned[2:]
# Correct the data in the extremities
tkeo[0], tkeo[-1] = tkeo[1], tkeo[-2]
return tkeo
# =============================================================================
# Linear Envelope
# =============================================================================
def _emg_amplitude_envelope(
emg_cleaned, sampling_rate=1000, lowcut=10, highcut=400, envelope_filter=8
):
"""Calculate the linear envelope of a signal.
This function implements a 2nd-order Butterworth filter with zero lag, described by Marcos Duarte
at <https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb>.
Parameters
----------
emg_cleaned : Union[list, np.array, pd.Series]
The cleaned electromyography channel as returned by `emg_clean()`.
sampling_rate : int
The sampling frequency of `emg_signal` (in Hz, i.e., samples/second).
lowcut : float
Low-cut frequency for the band-pass filter (in Hz). Defaults to 10Hz.
highcut : float
High-cut frequency for the band-pass filter (in Hz). Defaults to 400Hz.
envelope_filter : float
Cuttoff frequency for the high-pass filter (in Hz). Defauts to 8Hz.
Returns
-------
envelope : array
The linear envelope of the emg signal.
References
----------
- BMCLab: https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
"""
filtered = signal_filter(
emg_cleaned,
sampling_rate=sampling_rate,
lowcut=lowcut,
highcut=highcut,
method="butterworth",
order=2,
)
envelope = np.abs(filtered)
envelope = signal_filter(
envelope,
sampling_rate=sampling_rate,
lowcut=None,
highcut=envelope_filter,
method="butterworth",
order=2,
)
return envelope
| 4,219 | 29.142857 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_methods.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc.report import get_kwargs
from .emg_activation import emg_activation
def emg_methods(
sampling_rate=1000,
method_cleaning="biosppy",
method_activation="threshold",
**kwargs,
):
"""**EMG Preprocessing Methods**
This function analyzes and specifies the methods used in the preprocessing, and create a
textual description of the methods used. It is used by :func:`eda_process()` to dispatch the
correct methods to each subroutine of the pipeline and to create a
preprocessing report.
Parameters
----------
sampling_rate : int
The sampling frequency of the raw EMG signal (in Hz, i.e., samples/second).
method_cleaning : str
The method used for cleaning the raw EMG signal. Can be one of ``"biosppy"`` or ``"none"``.
Defaults to ``"biosppy"``. If ``"none"`` is passed, the raw signal will be used without
any cleaning.
method_activation: str
The method used for locating EMG activity. Defaults to ``"threshold"``.
For more information, see the ``"method"`` argument
of :func:`.emg_activation`.
**kwargs
Other arguments to be passed to :func:`.emg_activation`,
Returns
-------
report_info : dict
A dictionary containing the keyword arguments passed to the cleaning and activation
functions, text describing the methods, and the corresponding references.
See Also
--------
"""
# Sanitize inputs
method_cleaning = str(method_cleaning).lower()
method_activation = str(method_activation).lower()
# Create dictionary with all inputs
report_info = {
"sampling_rate": sampling_rate,
"method_cleaning": method_cleaning,
"method_activation": method_activation,
**kwargs,
}
# Get arguments to be passed to activation function
kwargs_activation, report_info = get_kwargs(report_info, emg_activation)
# Save keyword arguments in dictionary
report_info["kwargs_activation"] = kwargs_activation
# Initialize refs list with NeuroKit2 reference
refs = [
"""Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H.,
Schölzel, C., & Chen, S. A. (2021). NeuroKit2: A Python toolbox for neurophysiological signal processing.
Behavior Research Methods, 53(4), 1689–1696. https://doi.org/10.3758/s13428-020-01516-y
"""
]
# 1. Cleaning
# ------------
# If no cleaning
report_info["text_cleaning"] = f"The raw signal, sampled at {sampling_rate} Hz,"
if method_cleaning in ["none"]:
report_info["text_cleaning"] += " was directly used without any cleaning."
else:
report_info["text_cleaning"] += (
" was cleaned using the " + method_cleaning + " method."
)
# 2. Activation
# -------------
report_info["text_activation"] = (
"EMG activity was detected using the " + method_activation + " method. "
)
if method_activation in ["silva"]:
if str(report_info["threshold"]) == "default":
threshold_str = "0.05"
else:
threshold_str = str(report_info["threshold"])
report_info["text_activation"] += f"""The threshold was {threshold_str}. """
refs.append(
"""Silva H, Scherer R, Sousa J, Londral A , "Towards improving the ssability of
electromyographic interfacess", Journal of Oral Rehabilitation, pp. 1-2, 2012."""
)
if method_activation in ["mixture"]:
report_info[
"text_activation"
] += """A Gaussian mixture model was used to discriminate between activity and baseline. """
if str(report_info["threshold"]) == "default":
threshold_str = "0.33"
else:
threshold_str = str(report_info["threshold"])
report_info[
"text_activation"
] += f"""The minimum probability required to
be considered as activated was {threshold_str}. """
elif method_activation in ["threshold"]:
report_info[
"text_activation"
] += """The signal was considered as activated when the amplitude exceeded a threshold. """
if str(report_info["threshold"]) == "default":
threshold_str = "one tenth of the standard deviation of emg_amplitude"
else:
threshold_str = str(report_info["threshold"])
report_info[
"text_activation"
] += f"""The minimum amplitude to detect as onset was set to {threshold_str}."""
elif method_activation in ["biosppy"]:
if str(report_info["threshold"]) == "default":
threshold_str = "1.2 times of the mean of the absolute of the smoothed, full-wave-rectified signal"
else:
threshold_str = str(report_info["threshold"])
report_info[
"text_activation"
] += f"""The threshold was set to {threshold_str}."""
# 3. References
# -------------
report_info["references"] = list(np.unique(refs))
return report_info
| 5,092 | 36.448529 | 111 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_activation.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..events import events_find
from ..misc import as_vector
from ..signal import (signal_binarize, signal_changepoints, signal_formatpeaks,
signal_smooth)
def emg_activation(
emg_amplitude=None,
emg_cleaned=None,
sampling_rate=1000,
method="threshold",
threshold="default",
duration_min="default",
size=None,
threshold_size=None,
**kwargs,
):
"""**Locate EMG Activity**
Detects onset in EMG signal based on the amplitude threshold.
Parameters
----------
emg_amplitude : array
At least one EMG-related signal. Either the amplitude of the EMG signal, obtained from
``emg_amplitude()`` for methods like ``"threshold"`` or ``"mixture"``), and / or the
cleaned EMG signal (for methods like ``"pelt"``, ``"biosppy"`` or ``"silva"``).
emg_cleaned : array
At least one EMG-related signal. Either the amplitude of the EMG signal, obtained from
``emg_amplitude()`` for methods like ``"threshold"`` or ``"mixture"``), and / or the
cleaned EMG signal (for methods like ``"pelt"``, ``"biosppy"`` or ``"silva"``).
sampling_rate : int
The sampling frequency of ``emg_signal`` (in Hz, i.e., samples/second).
method : str
The algorithm used to discriminate between activity and baseline. Can be one of
``"mixture"`` (default) or ``"threshold"``. If ``"mixture"``, will use a Gaussian Mixture
Model to categorize between the two states. If ``"threshold"``, will consider as activated
all points which amplitude is superior to the threshold. Can also be ``"pelt"`` or
``"biosppy"`` or ``"silva"``.
threshold : str
If ``method`` is ``"mixture"``, then it corresponds to the minimum probability required to
be considered as activated (default to 0.33). If ``method`` is ``"threshold"``, then it
corresponds to the minimum amplitude to detect as onset i.e., defaults to one tenth of the
standard deviation of ``emg_amplitude``. If ``method`` is ``"silva"``, defaults to 0.05. If
``method`` is ``"biosppy"``, defaults to 1.2 times of the mean of the absolute of the
smoothed, full-wave-rectified signal. If ``method`` is ``"pelt"``, threshold defaults to
``None`` as changepoints are used as a basis for detection.
duration_min : float
The minimum duration of a period of activity or non-activity in seconds.
If ``default``, will be set to 0.05 (50 ms).
size: float or int
Detection window size (seconds). Applicable only if ``method`` is ``"biosppy"`` or
``"silva"``. If ``None``, defaults to 0.05 for ``"biosppy"`` and 20 for ``"silva"``.
threshold_size : int
Window size for calculation of the adaptive threshold. Must be bigger than the detection
window size. Applicable only if ``method`` is ``"silva``". If ``None``, defaults to 22.
kwargs : optional
Other arguments.
Returns
-------
info : dict
A dictionary containing additional information, in this case the samples at which the
onsets, offsets, and periods of activations of the EMG signal occur, accessible with the
key ``"EMG_Onsets"``, ``"EMG_Offsets"``, and ``"EMG_Activity"`` respectively.
activity_signal : DataFrame
A DataFrame of same length as the input signal in which occurences of onsets, offsets, and
activity (above the threshold) of the EMG signal are marked as "1" in lists of zeros with
the same length as ``emg_amplitude``. Accessible with the keys ``"EMG_Onsets"``,
``"EMG_Offsets"``, and ``"EMG_Activity"`` respectively.
See Also
--------
emg_simulate, emg_clean, emg_amplitude, emg_process, emg_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate signal and obtain amplitude
emg = nk.emg_simulate(duration=10, burst_number=3)
emg_cleaned = nk.emg_clean(emg)
emg_amplitude = nk.emg_amplitude(emg_cleaned)
* **Example 1:** Threshold method
.. ipython:: python
activity, info = nk.emg_activation(emg_amplitude=emg_amplitude, method="threshold")
@savefig p_emg_activation1.png scale=100%
nk.events_plot([info["EMG_Offsets"], info["EMG_Onsets"]], emg_cleaned)
@suppress
plt.close()
* **Example 2:** Pelt method
.. ipython:: python
activity, info = nk.emg_activation(emg_cleaned=emg_cleaned, method="pelt")
@savefig p_emg_activation2.png scale=100%
nk.events_plot([info["EMG_Offsets"], info["EMG_Onsets"]], emg_cleaned)
@suppress
plt.close()
* **Example 3:** Biosppy method
.. ipython:: python
activity, info = nk.emg_activation(emg_cleaned=emg_cleaned, method="biosppy")
@savefig p_emg_activation3.png scale=100%
nk.events_plot([info["EMG_Offsets"], info["EMG_Onsets"]], emg_cleaned)
@suppress
plt.close()
* **Example 4:** Silva method
.. ipython:: python
activity, info = nk.emg_activation(emg_cleaned=emg_cleaned, method="silva")
@savefig p_emg_activation4.png scale=100%
nk.events_plot([info["EMG_Offsets"], info["EMG_Onsets"]], emg_cleaned)
@suppress
plt.close()
References
----------
* Silva H, Scherer R, Sousa J, Londral A , "Towards improving the ssability of
electromyographic interfacess", Journal of Oral Rehabilitation, pp. 1-2, 2012.
"""
# Sanity checks.
if emg_amplitude is not None:
emg_amplitude = as_vector(emg_amplitude)
if emg_cleaned is not None:
emg_cleaned = as_vector(emg_cleaned)
if emg_amplitude is None:
emg_amplitude = as_vector(emg_cleaned)
if duration_min == "default":
duration_min = int(0.05 * sampling_rate)
# Find offsets and onsets.
method = method.lower() # remove capitalised letters
if method == "threshold":
if emg_amplitude is None:
raise ValueError(
"NeuroKit error: emg_activation(): 'threshold' method needs 'emg_amplitude' signal to be passed."
)
activity = _emg_activation_threshold(emg_amplitude, threshold=threshold)
elif method == "mixture":
if emg_amplitude is None:
raise ValueError(
"NeuroKit error: emg_activation(): 'mixture' method needs 'emg_amplitude' signal to be passed."
)
activity = _emg_activation_mixture(emg_amplitude, threshold=threshold)
elif method == "pelt":
if emg_cleaned is None:
raise ValueError(
"NeuroKit error: emg_activation(): 'pelt' method needs 'emg_cleaned' (cleaned or raw EMG) signal to "
"be passed."
)
activity = _emg_activation_pelt(emg_cleaned, duration_min=duration_min, **kwargs)
elif method == "biosppy":
if emg_cleaned is None:
raise ValueError(
"NeuroKit error: emg_activation(): 'biosppy' method needs 'emg_cleaned' (cleaned EMG) "
"signal to be passed."
)
if size is None:
size = 0.05
activity = _emg_activation_biosppy(
emg_cleaned, sampling_rate=sampling_rate, size=size, threshold=threshold
)
elif method == "silva":
if emg_cleaned is None:
raise ValueError(
"NeuroKit error: emg_activation(): 'silva' method needs 'emg_cleaned' (cleaned EMG) "
"signal to be passed."
)
if size is None:
size = 20
if threshold_size is None:
threshold_size = 22
activity = _emg_activation_silva(
emg_cleaned, size=size, threshold=threshold, threshold_size=threshold_size
)
else:
raise ValueError(
"NeuroKit error: emg_activation(): 'method' should be one of 'mixture', 'threshold', 'pelt' or 'biosppy'."
)
# Sanitize activity.
info = _emg_activation_activations(activity, duration_min=duration_min)
# Prepare Output.
df_activity = signal_formatpeaks(
{"EMG_Activity": info["EMG_Activity"]},
desired_length=len(emg_amplitude),
peak_indices=info["EMG_Activity"],
)
df_onsets = signal_formatpeaks(
{"EMG_Onsets": info["EMG_Onsets"]},
desired_length=len(emg_amplitude),
peak_indices=info["EMG_Onsets"],
)
df_offsets = signal_formatpeaks(
{"EMG_Offsets": info["EMG_Offsets"]},
desired_length=len(emg_amplitude),
peak_indices=info["EMG_Offsets"],
)
# Modify output produced by signal_formatpeaks.
for x in range(len(emg_amplitude)):
if df_activity["EMG_Activity"][x] != 0:
if df_activity.index[x] == df_activity.index.get_loc(x):
df_activity["EMG_Activity"][x] = 1
else:
df_activity["EMG_Activity"][x] = 0
if df_offsets["EMG_Offsets"][x] != 0:
if df_offsets.index[x] == df_offsets.index.get_loc(x):
df_offsets["EMG_Offsets"][x] = 1
else:
df_offsets["EMG_Offsets"][x] = 0
activity_signal = pd.concat([df_activity, df_onsets, df_offsets], axis=1)
return activity_signal, info
# =============================================================================
# Methods
# =============================================================================
def _emg_activation_threshold(emg_amplitude, threshold="default"):
if threshold == "default":
threshold = (1 / 10) * np.std(emg_amplitude)
if threshold > np.max(emg_amplitude):
raise ValueError(
"NeuroKit error: emg_activation(): the threshold specified exceeds the maximum of the signal"
"amplitude."
)
activity = signal_binarize(emg_amplitude, method="threshold", threshold=threshold)
return activity
def _emg_activation_mixture(emg_amplitude, threshold="default"):
if threshold == "default":
threshold = 0.33
activity = signal_binarize(emg_amplitude, method="mixture", threshold=threshold)
return activity
def _emg_activation_pelt(emg_cleaned, threshold="default", duration_min=0.05, **kwargs):
if threshold == "default":
threshold = None
# Get changepoints
changepoints = signal_changepoints(emg_cleaned, change="var", show=False, **kwargs)
# Add first point
if changepoints[0] != 0:
changepoints = np.append(0, changepoints)
# Sanitize
lengths = np.append(0, np.diff(changepoints))
changepoints = changepoints[1:][lengths[1:] > duration_min]
# reèAdd first point
if changepoints[0] != 0:
changepoints = np.append(0, changepoints)
binary = np.full(len(emg_cleaned), np.nan)
binary[changepoints[0::2]] = 0
binary[changepoints[1::2]] = 1
activity = pd.Series(binary).fillna(method="ffill").values
# Label as 1 to parts that have the larger SD (likely to be activations)
if emg_cleaned[activity == 1].std() > emg_cleaned[activity == 0].std():
activity = np.abs(activity - 1)
activity[0] = 0
activity[-1] = 0
return activity
def _emg_activation_biosppy(emg_cleaned, sampling_rate=1000, size=0.05, threshold="default"):
"""Adapted from `find_onsets` in Biosppy."""
# check inputs
if emg_cleaned is None:
raise TypeError("Please specify an input signal.")
# full-wave rectification
fwlo = np.abs(emg_cleaned)
# smooth
size = int(sampling_rate * size)
mvgav = signal_smooth(fwlo, method="convolution", kernel="boxzen", size=size)
# threshold
if threshold == "default":
aux = np.abs(mvgav)
threshold = 1.2 * np.mean(aux)
# find onsets
# length = len(signal)
# start = np.nonzero(mvgav > threshold)[0]
# stop = np.nonzero(mvgav <= threshold)[0]
# onsets = np.union1d(np.intersect1d(start - 1, stop),
# np.intersect1d(start + 1, stop))
# if np.any(onsets):
# if onsets[-1] >= length:
# onsets[-1] = length - 1
activity = signal_binarize(mvgav, method="threshold", threshold=threshold)
return activity
def _emg_activation_silva(emg_cleaned, size=20, threshold_size=22, threshold="default"):
"""Follows the approach by Silva et al. 2012, adapted from `Biosppy`."""
if threshold_size <= size:
raise ValueError(
"NeuroKit error: emg_activation(): The window size for calculation of the "
"adaptive threshold must be bigger than the detection window size."
)
if threshold == "default":
threshold = 0.05
# subtract baseline offset
signal_zero_mean = emg_cleaned - np.mean(emg_cleaned)
# full-wave rectification
fwlo = np.abs(signal_zero_mean)
# moving average for calculating the test function
tf_mvgav = np.convolve(fwlo, np.ones((size,)) / size, mode="valid")
# moving average for calculating the adaptive threshold
threshold_mvgav = np.convolve(fwlo, np.ones((threshold_size,)) / threshold_size, mode="valid")
onset_time_list = []
offset_time_list = []
onset = False
for k in range(0, len(threshold_mvgav)):
if onset is True:
# an onset was previously detected, look for offset time
if tf_mvgav[k] < threshold_mvgav[k] and tf_mvgav[k] < threshold:
offset_time_list.append(k)
onset = False
# the offset has been detected, and we can look for another activation
else:
# we only look for another onset if a previous offset was detected
if tf_mvgav[k] >= threshold_mvgav[k] and tf_mvgav[k] >= threshold:
onset_time_list.append(k)
onset = True
onsets = np.union1d(onset_time_list, offset_time_list)
# adjust indices because of moving average
onsets += int(size / 2)
binary = np.full(len(emg_cleaned), np.nan)
binary[onsets[0::2]] = 0
binary[onsets[1::2]] = 1
activity = pd.Series(binary).fillna(method="bfill").values
activity = pd.Series(activity).fillna(0)
return activity
# =============================================================================
# Internals
# =============================================================================
def _emg_activation_activations(activity, duration_min=0.05):
activations = events_find(
activity, threshold=0.5, threshold_keep="above", duration_min=duration_min
)
activations["offset"] = activations["onset"] + activations["duration"]
baseline = events_find(
activity == 0, threshold=0.5, threshold_keep="above", duration_min=duration_min
)
baseline["offset"] = baseline["onset"] + baseline["duration"]
# Cross-comparison
valid = np.isin(activations["onset"], baseline["offset"])
onsets = activations["onset"][valid]
offsets = activations["offset"][valid]
# make sure offset indices are within length of signal
offsets = offsets[offsets < len(activity)]
new_activity = np.array([])
for x, y in zip(onsets, offsets):
activated = np.arange(x, y)
new_activity = np.append(new_activity, activated)
# Prepare Output.
info = {"EMG_Onsets": onsets, "EMG_Offsets": offsets, "EMG_Activity": new_activity}
return info
| 15,435 | 34.731481 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_simulate.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state
from ..signal import signal_resample
def emg_simulate(
duration=10,
length=None,
sampling_rate=1000,
noise=0.01,
burst_number=1,
burst_duration=1.0,
random_state=None,
):
"""**Simulate an EMG signal**
Generate an artificial (synthetic) EMG signal of a given duration and sampling rate.
Parameters
----------
duration : int
Desired recording length in seconds.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
length : int
The desired length of the signal (in samples).
noise : float
Noise level (gaussian noise).
burst_number : int
Desired number of bursts of activity (active muscle periods).
burst_duration : float or list
Duration of the bursts. Can be a float (each burst will have the same duration) or a list of
durations for each bursts.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
Returns
----------
array
Vector containing the EMG signal.
Examples
----------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
emg = nk.emg_simulate(duration=10, burst_number=3)
@savefig p_emg_simulate1.png scale=100%
fig = nk.signal_plot(emg)
@suppress
plt.close()
See Also
--------
ecg_simulate, rsp_simulate, eda_simulate, ppg_simulate
References
-----------
This function is based on `this script
<https://scientificallysound.org/2016/08/11/python-analysing-emg-signals-part-1/>`_.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
# Sanity checks
if isinstance(burst_duration, (int, float)):
burst_duration = np.repeat(burst_duration, burst_number)
if len(burst_duration) > burst_number:
raise ValueError(
"NeuroKit error: emg_simulate(): 'burst_duration' cannot be longer than the value of 'burst_number'"
)
total_duration_bursts = np.sum(burst_duration)
if total_duration_bursts > duration:
raise ValueError(
"NeuroKit error: emg_simulate(): The total duration of bursts cannot exceed the total duration"
)
# Generate bursts
bursts = []
for burst in range(burst_number):
bursts += [list(rng.uniform(-1, 1, size=int(1000 * burst_duration[burst])) + 0.08)]
# Generate quiet
n_quiet = burst_number + 1 # number of quiet periods (in between bursts)
duration_quiet = (duration - total_duration_bursts) / n_quiet # duration of each quiet period
quiets = []
for quiet in range(n_quiet): # pylint: disable=W0612
quiets += [list(rng.uniform(-0.05, 0.05, size=int(1000 * duration_quiet)) + 0.08)]
# Merge the two
emg = []
for i in range(len(quiets)): # pylint: disable=C0200
emg += quiets[i]
if i < len(bursts):
emg += bursts[i]
emg = np.array(emg)
# Add random (gaussian distributed) noise
emg += rng.normal(0, noise, len(emg))
# Resample
emg = signal_resample(
emg, sampling_rate=1000, desired_length=length, desired_sampling_rate=sampling_rate
)
return emg
| 3,570 | 29.008403 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..misc.report import create_report
from ..signal import signal_sanitize
from .emg_activation import emg_activation
from .emg_amplitude import emg_amplitude
from .emg_clean import emg_clean
from .emg_methods import emg_methods
from .emg_plot import emg_plot
def emg_process(emg_signal, sampling_rate=1000, report=None, **kwargs):
"""**Process a electromyography (EMG) signal**
Convenience function that automatically processes an electromyography signal.
Parameters
----------
emg_signal : Union[list, np.array, pd.Series]
The raw electromyography channel.
sampling_rate : int
The sampling frequency of ``emg_signal`` (in Hz, i.e., samples/second).
report : str
The filename of a report containing description and figures of processing
(e.g. ``"myreport.html"``). Needs to be supplied if a report file
should be generated. Defaults to ``None``. Can also be ``"text"`` to
just print the text in the console without saving anything.
**kwargs
Other arguments to be passed to specific methods. For more information,
see :func:`.emg_methods`.
Returns
-------
signals : DataFrame
A DataFrame of same length as ``emg_signal`` containing the following columns:
* ``"EMG_Raw"``: the raw signal.
* ``"EMG_Clean"``: the cleaned signal.
* ``"EMG_Amplitude"``: the signal amplitude, or the activation level of the signal.
* ``"EMG_Activity"``: the activity of the signal for which amplitude exceeds the threshold
specified,marked as "1" in a list of zeros.
* ``"EMG_Onsets"``: the onsets of the amplitude, marked as "1" in a list of zeros.
* ``"EMG_Offsets"``: the offsets of the amplitude, marked as "1" in a list of zeros.
info : dict
A dictionary containing the information of each amplitude onset, offset, and peak activity
(see :func:`emg_activation`), as well as the signals' sampling rate.
See Also
--------
emg_clean, emg_amplitude, emg_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
emg = nk.emg_simulate(duration=10, sampling_rate=1000, burst_number=3)
signals, info = nk.emg_process(emg, sampling_rate=1000)
@savefig p_emg_process1.png scale=100%
nk.emg_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
emg_signal = signal_sanitize(emg_signal)
methods = emg_methods(sampling_rate=sampling_rate, **kwargs)
# Clean signal
emg_cleaned = emg_clean(
emg_signal, sampling_rate=sampling_rate, method=methods["method_cleaning"]
)
# Get amplitude
amplitude = emg_amplitude(emg_cleaned)
# Get onsets, offsets, and periods of activity
activity_signal, info = emg_activation(
emg_amplitude=amplitude,
emg_cleaned=emg_cleaned,
sampling_rate=sampling_rate,
method=methods["method_activation"],
**methods["kwargs_activation"]
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
# Prepare output
signals = pd.DataFrame(
{"EMG_Raw": emg_signal, "EMG_Clean": emg_cleaned, "EMG_Amplitude": amplitude}
)
signals = pd.concat([signals, activity_signal], axis=1)
if report is not None:
# Generate report containing description and figures of processing
if ".html" in str(report):
fig = emg_plot(signals, sampling_rate=sampling_rate, static=False)
else:
fig = None
create_report(file=report, signals=signals, info=methods, fig=fig)
return signals, info
| 3,693 | 33.523364 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/__init__.py | """Submodule for NeuroKit."""
from .emg_activation import emg_activation
from .emg_amplitude import emg_amplitude
from .emg_analyze import emg_analyze
from .emg_clean import emg_clean
from .emg_eventrelated import emg_eventrelated
from .emg_intervalrelated import emg_intervalrelated
from .emg_plot import emg_plot
from .emg_process import emg_process
from .emg_simulate import emg_simulate
__all__ = [
"emg_simulate",
"emg_clean",
"emg_amplitude",
"emg_process",
"emg_plot",
"emg_activation",
"emg_eventrelated",
"emg_intervalrelated",
"emg_analyze",
]
| 593 | 22.76 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_intervalrelated.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def emg_intervalrelated(data):
"""**EMG Analysis for Interval-related Data**
Performs EMG analysis on longer periods of data (typically > 10 seconds), such as resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A DataFrame containing the different processed signal(s) as different columns, typically
generated by ``emg_process()`` or ``bio_process()``. Can also take a dict containing sets of
separately processed DataFrames.
Returns
-------
DataFrame
A dataframe containing the analyzed EMG features. The analyzed features consist of the following:
* ``"EMG_Activation_N"``: the number of bursts of muscular activity.
* ``"EMG_Amplitude_Mean"``: the mean amplitude of the muscular activity.
See Also
--------
.bio_process, emg_eventrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example with simulated data
emg = nk.emg_simulate(duration=40, sampling_rate=1000, burst_number=3)
emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
# Single dataframe is passed
nk.emg_intervalrelated(emg_signals)
epochs = nk.epochs_create(emg_signals, events=[0, 20000], sampling_rate=1000, epochs_end=20)
nk.emg_intervalrelated(epochs)
"""
intervals = {}
# Format input
if isinstance(data, pd.DataFrame):
activity_cols = [col for col in data.columns if "EMG_Onsets" in col]
if len(activity_cols) == 1:
intervals["Activation_N"] = data[activity_cols[0]].values.sum()
else:
raise ValueError(
"NeuroKit error: emg_intervalrelated(): Wrong"
"input, we couldn't extract activity bursts."
"Please make sure your DataFrame"
"contains an `EMG_Onsets` column."
)
amplitude_cols = ["EMG_Amplitude", "EMG_Activity"]
len([col in data.columns for col in amplitude_cols])
if len(amplitude_cols) == 2:
data_bursts = data.loc[data["EMG_Activity"] == 1]
intervals["Amplitude_Mean"] = data_bursts["EMG_Amplitude"].values.mean()
else:
raise ValueError(
"NeuroKit error: emg_intervalrelated(): Wrong"
"input, we couldn't extract EMG amplitudes."
"Please make sure your DataFrame contains both"
"`EMG_Amplitude` and `EMG_Activity` columns."
)
emg_intervals = pd.DataFrame.from_dict(intervals, orient="index").T.add_prefix("EMG_")
elif isinstance(data, dict):
for index in data:
intervals[index] = {} # Initialize empty container
# Add label info
intervals[index]["Label"] = data[index]["Label"].iloc[0]
intervals[index] = _emg_intervalrelated_formatinput(data[index], intervals[index])
emg_intervals = pd.DataFrame.from_dict(intervals, orient="index")
return emg_intervals
# =============================================================================
# Internals
# =============================================================================
def _emg_intervalrelated_formatinput(interval, output={}):
"""Format input for dictionary."""
# Sanitize input
colnames = interval.columns.values
if len([i for i in colnames if "EMG_Onsets" in i]) == 0:
raise ValueError(
"NeuroKit error: emg_intervalrelated(): Wrong"
"input, we couldn't extract activity bursts."
"Please make sure your DataFrame"
"contains an `EMG_Onsets` column."
)
activity_cols = ["EMG_Amplitude", "EMG_Activity"]
if len([i in colnames for i in activity_cols]) != 2:
raise ValueError(
"NeuroKit error: emg_intervalrelated(): Wrong"
"input, we couldn't extract EMG amplitudes."
"Please make sure your DataFrame contains both"
"`EMG_Amplitude` and `EMG_Activity` columns."
)
bursts = interval["EMG_Onsets"].values
data_bursts = interval.loc[interval["EMG_Activity"] == 1]
output["EMG_Activation_N"] = np.sum(bursts)
output["EMG_Amplitude_Mean"] = data_bursts["EMG_Amplitude"].values.mean()
return output
| 4,366 | 35.090909 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/emg/emg_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_detrend
def emg_clean(emg_signal, sampling_rate=1000, method="biosppy"):
"""**Preprocess an electromyography (emg) signal**
Clean an EMG signal using a set of parameters. Only one method is available at the moment.
* **BioSPPy**: fourth order 100 Hz highpass Butterworth filter followed by a constant
detrending.
Parameters
----------
emg_signal : Union[list, np.array, pd.Series]
The raw EMG channel.
sampling_rate : int
The sampling frequency of ``emg_signal`` (in Hz, i.e., samples/second).
Defaults to 1000.
method : str
The processing pipeline to apply. Can be one of ``"biosppy"`` or ``"none"``.
Defaults to ``"biosppy"``. If ``"none"`` is passed, the raw signal will be returned without
any cleaning.
Returns
-------
array
Vector containing the cleaned EMG signal.
See Also
--------
emg_amplitude, emg_process, emg_plot
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
emg = nk.emg_simulate(duration=10, sampling_rate=1000)
signals = pd.DataFrame({"EMG_Raw": emg, "EMG_Cleaned":nk.emg_clean(emg, sampling_rate=1000)})
@savefig p_emg_clean1.png scale=100%
fig = signals.plot()
@suppress
plt.close()
"""
emg_signal = as_vector(emg_signal)
# Missing data
n_missing = np.sum(np.isnan(emg_signal))
if n_missing > 0:
warn(
"There are " + str(n_missing) + " missing data points in your signal."
" Filling missing values by using the forward filling method.",
category=NeuroKitWarning,
)
emg_signal = _emg_clean_missing(emg_signal)
method = str(method).lower()
if method in ["none"]:
clean = emg_signal
elif method in ["biosppy"]:
clean = _emg_clean_biosppy(emg_signal, sampling_rate=sampling_rate)
else:
raise ValueError(
"NeuroKit error: emg_clean(): 'method' should be one of 'biosppy' or 'none'."
)
return clean
# =============================================================================
# Handle missing data
# =============================================================================
def _emg_clean_missing(emg_signal):
emg_signal = pd.DataFrame.pad(pd.Series(emg_signal))
return emg_signal
# =============================================================================
# BioSPPy
# =============================================================================
def _emg_clean_biosppy(emg_signal, sampling_rate=1000):
# Parameters
order = 4
frequency = 100
frequency = (
2 * np.array(frequency) / sampling_rate
) # Normalize frequency to Nyquist Frequency (Fs/2).
# Filtering
b, a = scipy.signal.butter(N=order, Wn=frequency, btype="highpass", analog=False)
filtered = scipy.signal.filtfilt(b, a, emg_signal)
# Baseline detrending
clean = signal_detrend(filtered, order=0)
return clean
| 3,220 | 28.550459 | 99 | py |
ULR | ULR-main/dual-encoder/L2/utils.py | import os
from transformers.data.processors.utils import DataProcessor, InputExample
from transformers import PreTrainedTokenizer
from tqdm import tqdm
import random
import code
from typing import List, Optional, Union
import json
from dataclasses import dataclass
import logging
logger = logging.getLogger(__name__)
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids_a: List[int]
input_ids_b: List[int]
attention_mask_a: Optional[List[int]] = None
token_type_ids_a: Optional[List[int]] = None
attention_mask_b: Optional[List[int]] = None
token_type_ids_b: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
def dual_encoder_convert_examples_to_features(
examples: List[InputExample],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
):
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[int, float, None]:
if example.label is None:
return None
if output_mode == "classification":
return label_map[example.label]
elif output_mode == "regression":
return float(example.label)
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
batch_encoding_a = tokenizer.batch_encode_plus(
[(example.text_a) for example in examples], max_length=max_length, pad_to_max_length=True,
)
batch_encoding_b = tokenizer.batch_encode_plus(
[(example.text_b) for example in examples], max_length=max_length, pad_to_max_length=True,
)
features = []
for i in range(len(examples)):
inputs = {}
for k in batch_encoding_a:
inputs[k + "_a"] = batch_encoding_a[k][i]
inputs[k + "_b"] = batch_encoding_b[k][i]
feature = InputFeatures(**inputs, label=labels[i])
features.append(feature)
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("features: %s" % features[i])
return features
class NatcatProcessor(DataProcessor):
"""Processor for the Natcat data set."""
def __init__(self):
super(NatcatProcessor, self).__init__()
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of article-category pairs
"""
examples = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split("\t")
pos_cat = line[0]
neg_cats = line[1:-1]
article = line[-1]
for neg_cat in neg_cats:
examples.append(InputExample(guid=i, text_a=pos_cat, text_b=article, label='1'))
i += 1
examples.append(InputExample(guid=i, text_a=neg_cat, text_b=article, label='0'))
i += 1
return examples
def get_labels(self):
"""See base class."""
return ["0", "1"]
class EvalProcessor:
def __init__(self, cat_file_path):
super(EvalProcessor, self).__init__()
self.cats = []
with open(cat_file_path) as fin:
for line in fin:
self.cats.append(line.strip())
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of the evaluation dataset
"""
examples = []
labels = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split(",", 1)
if line[0].startswith("'") or line[0].startswith('"'):
line[0] = line[0][1:-1]
label = int(line[0]) - 1
text = " ".join(line[1][1:-1].split()[:128])
if text.strip() == "":
text = "N/A"
for cat in self.cats:
i += 1
if label >= len(self.cats):
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))
else:
if cat == self.cats[label]:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))
else:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=0))
return examples
def get_labels(self):
return [0, 1]
processors = {
"natcat": NatcatProcessor,
"eval": EvalProcessor,
}
output_modes = {
"natcat": "classification",
"eval": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(task_name, preds, labels):
if task_name in ["natcat"]:
return {"acc": simple_accuracy(preds, labels)}
if task_name in ["eval"]:
return {"acc": simple_accuracy(preds, labels)}
class DataFiles:
def __init__(self, directory):
self.all_files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(".data")]
self.todo_files = self.all_files
def next(self):
if len(self.todo_files) == 0:
return None
return self.todo_files.pop()
def save(self, file_path):
with open(file_path, "w") as fout:
for f in self.todo_files:
fout.write(f + "\n")
def load(self, file_path):
self.todo_files = []
with open(file_path) as fin:
for f in fin:
self.todo_files.append(f.strip())
| 6,812 | 31.913043 | 154 | py |
ULR | ULR-main/dual-encoder/L2/compute_acc_kmeans.py | import numpy as np
import sys
from sklearn.metrics import f1_score
from sklearn.kernel_approximation import RBFSampler
import code
if len(sys.argv) not in [4]:
print("Usage: python compute_acc.py test_file text_embeddings category_embeddings ")
exit(-1)
test_file = sys.argv[1]
text_file = sys.argv[2]
cat_file = sys.argv[3]
with open(test_file) as fin:
labels = []
for line in fin:
label, _ = line.split(",", 1)
if label.startswith("'") or label.startswith('"'):
label = label[1:-1]
labels.append(int(label) - 1)
labels = np.array(labels)
text_embeddings = np.loadtxt(text_file)
cat_embeddings = np.loadtxt(cat_file)
num_cats = cat_embeddings.shape[0]
def euclidean(a, b):
return np.sum((a - b) * (a-b), -1)
centroids = cat_embeddings
all_accs = []
all_scores = []
for k in range(100):
centroids = np.expand_dims(centroids, 0)
text_embeddings_unsqueezed = np.expand_dims(text_embeddings, 1)
scores = euclidean(text_embeddings_unsqueezed, centroids)
avg_score = np.mean(scores.min(1))
all_scores.append(avg_score)
new_preds = scores.argmin(1)
if k > 0:
change_of_preds = np.sum(preds != new_preds)
print("change of preds: ", change_of_preds)
print("average score: ", avg_score)
# code.interact(local=locals())
preds = new_preds
acc = np.sum(preds == labels) / len(labels)
all_accs.append(acc)
print("after {} iterations of k means accuracy: {}".format(k, acc))
centroids = []
for i in range(num_cats):
centroid = text_embeddings[preds == i]
if centroid.shape[0] > 0:
centroids.append(centroid.mean(0, keepdims=True))
else:
centroids.append(text_embeddings.mean(0, keepdims=True))
centroids = np.concatenate(centroids, 0)
centroids = (centroids + cat_embeddings) / 2
if k > 0 and change_of_preds == 0:
break
min_index = np.argmin(all_scores)
print("min avg score: ", all_scores[min_index], ", the corresponding accuracy: ", all_accs[min_index], ", max accuracy: ", max(all_accs))
| 2,121 | 28.068493 | 137 | py |
ULR | ULR-main/dual-encoder/L2/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = []
text_embeddings = []
category_embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
# code.interact(local=locals())
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1]
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
category_embeddings.append(rep1.data.cpu().numpy())
text_embeddings.append(rep2.data.cpu().numpy())
nb_eval_steps += 1
if preds is None:
preds.append(score.detach().cpu().numpy().reshape((-1)))
else:
preds.append(score.detach().cpu().numpy().reshape((-1)))
preds = np.concatenate(preds, 0)
preds = preds.reshape((-1, len(cats)))
logger.info("save prediction file to eval_output_dir")
out_file_name = ".".join(file_path.split("/")[-2:])
text_embeddings = np.concatenate(text_embeddings, 0)
category_embeddings = np.concatenate(category_embeddings, 0)
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".text.txt"), text_embeddings[::len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".category.txt"), category_embeddings[:len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument(
"--all_cats_file", default=None, type=str, help="The file containing all category names",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = os.path.join(args.eval_data_dir, "test.csv")
# if args.task_name == "nyt":
if args.multi_class:
file_path = os.path.join(args.eval_data_dir, "test.doc.txt")
if args.all_cats_file is not None:
all_cats_file = args.all_cats_file
else:
all_cats_file = os.path.join(args.eval_data_dir, "classes.txt.acl")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, args.eval_data_file, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 23,469 | 39.25729 | 164 | py |
ULR | ULR-main/dual-encoder/L2/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import torch.nn.functional as F
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1] # now the score will be between -1 and 1
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
score = score.view(-1, 2)
# code.interact(local=locals())
score = score[:, 0] + args.margin - score[:, 1]
score[score <= 0] = 0
loss = score.mean()
#code.interact(local=locals())
# if label is 1, we want score to be high, so we negate it
# if label is 0, we want the score to be low, so we keep the sign
# loss = 2 - (labels - 0.5) * 2 * cosine_score
# loss = (1 - 2 * labels) * cosine_score
# This loss function does not give good model performances
# loss = labels * 2 - (labels - 0.5) * 2 * (cosine_score + 1)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss = loss.mean()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--margin",
default=0.5,
type=float,
help="The margin to train with hinge loss",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 33,817 | 41.753477 | 177 | py |
ULR | ULR-main/dual-encoder/cosine/utils.py | import os
from transformers.data.processors.utils import DataProcessor, InputExample
from transformers import PreTrainedTokenizer
from tqdm import tqdm
import random
import code
from typing import List, Optional, Union
import json
from dataclasses import dataclass
import logging
logger = logging.getLogger(__name__)
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids_a: List[int]
input_ids_b: List[int]
attention_mask_a: Optional[List[int]] = None
token_type_ids_a: Optional[List[int]] = None
attention_mask_b: Optional[List[int]] = None
token_type_ids_b: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
def dual_encoder_convert_examples_to_features(
examples: List[InputExample],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
):
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[int, float, None]:
if example.label is None:
return None
if output_mode == "classification":
return label_map[example.label]
elif output_mode == "regression":
return float(example.label)
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
batch_encoding_a = tokenizer.batch_encode_plus(
[(example.text_a) for example in examples], max_length=max_length, pad_to_max_length=True,
)
batch_encoding_b = tokenizer.batch_encode_plus(
[(example.text_b) for example in examples], max_length=max_length, pad_to_max_length=True,
)
features = []
for i in range(len(examples)):
inputs = {}
for k in batch_encoding_a:
inputs[k + "_a"] = batch_encoding_a[k][i]
inputs[k + "_b"] = batch_encoding_b[k][i]
feature = InputFeatures(**inputs, label=labels[i])
features.append(feature)
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("features: %s" % features[i])
return features
class NatcatProcessor(DataProcessor):
"""Processor for the Natcat data set."""
def __init__(self):
super(NatcatProcessor, self).__init__()
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of article-category pairs
"""
examples = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split("\t")
pos_cats = line[:1]
neg_cats = line[len(pos_cats):-1]
article = line[-1]
for pos_cat in pos_cats:
examples.append(InputExample(guid=i, text_a=pos_cat, text_b=article, label='1'))
i += 1
for neg_cat in neg_cats:
examples.append(InputExample(guid=i, text_a=neg_cat, text_b=article, label='0'))
i += 1
return examples
def get_labels(self):
"""See base class."""
return ["0", "1"]
class EvalProcessor:
def __init__(self, cat_file_path):
super(EvalProcessor, self).__init__()
self.cats = []
with open(cat_file_path) as fin:
for line in fin:
self.cats.append(line.strip())
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of the evaluation dataset
"""
examples = []
labels = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split(",", 1)
if line[0].startswith("'") or line[0].startswith('"'):
line[0] = line[0][1:-1]
label = int(line[0]) - 1
text = " ".join(line[1][1:-1].split()[:128])
if text.strip() == "":
text = "N/A"
for cat in self.cats:
i += 1
if label >= len(self.cats):
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))
else:
if cat == self.cats[label]:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))
else:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=0))
return examples
def get_labels(self):
return [0, 1]
processors = {
"natcat": NatcatProcessor,
"eval": EvalProcessor,
}
output_modes = {
"natcat": "classification",
"eval": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(task_name, preds, labels):
if task_name in ["natcat"]:
return {"acc": simple_accuracy(preds, labels)}
if task_name in ["eval"]:
return {"acc": simple_accuracy(preds, labels)}
class DataFiles:
def __init__(self, directory):
self.all_files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(".data")]
self.todo_files = self.all_files
def next(self):
if len(self.todo_files) == 0:
return None
return self.todo_files.pop()
def save(self, file_path):
with open(file_path, "w") as fout:
for f in self.todo_files:
fout.write(f + "\n")
def load(self, file_path):
self.todo_files = []
with open(file_path) as fin:
for f in fin:
self.todo_files.append(f.strip())
| 6,865 | 32.009615 | 154 | py |
ULR | ULR-main/dual-encoder/cosine/compute_acc_kmeans_cosine.py | import numpy as np
import sys
from sklearn.metrics import f1_score
import code
import random
random.seed(1)
if len(sys.argv) not in [4]:
print("Usage: python compute_acc.py test_file text_embeddings category_embeddings ")
exit(-1)
test_file = sys.argv[1]
text_file = sys.argv[2]
cat_file = sys.argv[3]
with open(test_file) as fin:
labels = []
for line in fin:
label, _ = line.split(",", 1)
if label.startswith("'") or label.startswith('"'):
label = label[1:-1]
labels.append(int(label) - 1)
labels = np.array(labels)
def normalize(x):
return x / np.sum(x * x, -1, keepdims=True)
text_embeddings = np.loadtxt(text_file)
cat_embeddings = np.loadtxt(cat_file)
num_cats = cat_embeddings.shape[0]
text_embeddings = normalize(text_embeddings)
cat_embeddings = normalize(cat_embeddings)
def dot_product(a, b):
return np.sum(a * b, -1)
centroids = cat_embeddings
all_avg_scores = []
all_accs = []
for k in range(100):
centroids = np.expand_dims(centroids, 0)
text_embeddings_unsqueezed = np.expand_dims(text_embeddings, 1)
scores = dot_product(text_embeddings_unsqueezed, centroids)
avg_score = np.mean(scores.max(1))
all_avg_scores.append(avg_score)
new_preds = scores.argmax(1)
if k > 0:
change_of_preds = np.sum(preds != new_preds)
print("change of preds: ", change_of_preds)
preds = new_preds
acc = np.sum(preds == labels) / len(labels)
all_accs.append(acc)
print("average score: ", avg_score)
print("after {} iterations of k means accuracy: {}".format(k, acc))
centroids = []
for i in range(num_cats):
centroid = text_embeddings[preds == i]
if centroid.shape[0] > 0:
centroids.append(centroid.mean(0, keepdims=True))
else:
centroids.append(text_embeddings.mean(0, keepdims=True))
centroids = np.concatenate(centroids, 0)
centroids = (centroids + cat_embeddings) / 2
if k > 0 and change_of_preds == 0:
break
max_index = np.argmax(all_avg_scores)
print("max avg score: ", all_avg_scores[max_index], ", the corresponding accuracy: ", all_accs[max_index], ", max accuracy: ", max(all_accs))
| 2,231 | 24.953488 | 141 | py |
ULR | ULR-main/dual-encoder/cosine/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = []
text_embeddings = []
category_embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
# code.interact(local=locals())
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1]
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
if args.similarity_function == "dot":
score = torch.sum(rep1 * rep2, -1) # now the score will be between -1 and 1
elif args.similarity_function == "cosine":
score = F.cosine_similarity(rep1, rep2) # now the score will be between -1 and 1
category_embeddings.append(rep1.data.cpu().numpy())
text_embeddings.append(rep2.data.cpu().numpy())
nb_eval_steps += 1
if preds is None:
preds.append(score.detach().cpu().numpy().reshape((-1)))
else:
preds.append(score.detach().cpu().numpy().reshape((-1)))
preds = np.concatenate(preds, 0)
preds = preds.reshape((-1, len(cats)))
logger.info("save prediction file to eval_output_dir")
out_file_name = ".".join(file_path.split("/")[-2:])
text_embeddings = np.concatenate(text_embeddings, 0)
category_embeddings = np.concatenate(category_embeddings, 0)
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".text.txt"), text_embeddings[::len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".category.txt"), category_embeddings[:len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--similarity_function",
default="pool",
choices=["dot", "cosine"],
type=str,
help="The similarity scoring function",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument(
"--all_cats_file", default=None, type=str, help="The file containing all category names",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = os.path.join(args.eval_data_dir, "test.csv")
# if args.task_name == "nyt":
if args.all_cats_file is not None:
all_cats_file = args.all_cats_file
else:
all_cats_file = os.path.join(args.eval_data_dir, "classes.txt.acl")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
# result = evaluate(args, model, tokenizer, file_path, all_cats_file, prefix=prefix)
result = evaluate(args, model, tokenizer, args.eval_data_file, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 22,345 | 39.190647 | 164 | py |
ULR | ULR-main/dual-encoder/cosine/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import torch.nn.functional as F
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
if args.similarity_function == "dot":
crit = torch.nn.BCEWithLogitsLoss()
elif args.similarity_function == "euclidean":
pass
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1] # now the score will be between -1 and 1
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
if args.similarity_function == "dot":
score = torch.sum(rep1 * rep2, -1) # now the score will be between -1 and 1
loss = crit(score, labels.float())
elif args.similarity_function == "euclidean":
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
#code.interact(local=locals())
# if label is 1, we want score to be high, so we negate it
# if label is 0, we want the score to be low, so we keep the sign
# loss = 2 - (labels - 0.5) * 2 * cosine_score
# loss = (1 - 2 * labels) * cosine_score
# This loss function does not give good model performances
# loss = labels * 2 - (labels - 0.5) * 2 * (cosine_score + 1)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss = loss.mean()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--similarity_function",
default="pool",
choices=["dot"],
type=str,
help="The similarity scoring function",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 34,036 | 41.813836 | 177 | py |
ULR | ULR-main/single-encoder/utils.py | import os
from transformers.data.processors.utils import DataProcessor, InputExample
from tqdm import tqdm
import random
import code
class NatcatProcessor(DataProcessor):
"""Processor for the Natcat data set."""
def __init__(self):
super(NatcatProcessor, self).__init__()
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of article-category pairs
"""
examples = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split("\t")
pos_cats = line[:1]
neg_cats = line[len(pos_cats):-1]
article = line[-1]
for pos_cat in pos_cats:
examples.append(InputExample(guid=i, text_a=pos_cat, text_b=article, label='1'))
i += 1
for neg_cat in neg_cats:
examples.append(InputExample(guid=i, text_a=neg_cat, text_b=article, label='0'))
i += 1
return examples
def get_labels(self):
"""See base class."""
return ["0", "1"]
class EvalProcessor:
def __init__(self, cat_file_path):
super(EvalProcessor, self).__init__()
self.cats = []
with open(cat_file_path) as fin:
for line in fin.read().strip().split("\n"):
self.cats.append(line.strip())
def get_examples(self, filepath):
"""See base class."""
"""
filepath: the file of the evaluation dataset
"""
examples = []
labels = []
i = 0
with open(filepath) as fin:
lines = fin.read().strip().split("\n")
for line in tqdm(lines):
line = line.strip().split(",", 1)
if line[0].startswith("'") or line[0].startswith('"'):
line[0] = line[0][1:-1]
label = int(line[0]) - 1
text = " ".join(line[1][1:-1].split()[:128])
if text.strip() == "":
text = "N/A"
for cat in self.cats:
i += 1
if cat == self.cats[label]:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=1))
else:
examples.append(InputExample(guid=i, text_a=cat, text_b=text, label=0))
return examples
def get_labels(self):
return [0, 1]
processors = {
"natcat": NatcatProcessor,
"eval": EvalProcessor,
}
output_modes = {
"natcat": "classification",
"eval": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def compute_metrics(task_name, preds, labels):
if task_name in ["wikicat"]:
return {"acc": simple_accuracy(preds, labels)}
if task_name in ["eval"]:
return {"acc": simple_accuracy(preds, labels)}
class DataFiles:
def __init__(self, directory):
self.all_files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(".data")]
self.todo_files = self.all_files
def next(self):
if len(self.todo_files) == 0:
return None
return self.todo_files.pop()
def save(self, file_path):
with open(file_path, "w") as fout:
for f in self.todo_files:
fout.write(f + "\n")
def load(self, file_path):
self.todo_files = []
with open(file_path) as fin:
for f in fin:
self.todo_files.append(f.strip())
| 3,716 | 29.975 | 154 | py |
ULR | ULR-main/single-encoder/compute_acc_kmeans.py | import numpy as np
import sys
from sklearn.metrics import f1_score
from scipy.spatial import distance
from scipy.special import softmax, kl_div
import code
if len(sys.argv) != 3:
print("Usage: python compute_acc.py preds_file test_file")
exit(-1)
test_file = sys.argv[1]
preds_file = sys.argv[2]
with open(test_file) as fin:
labels = []
for line in fin:
label, _ = line.split(",", 1)
if label.startswith("'") or label.startswith('"'):
label = label[1:-1]
labels.append(int(label) - 1)
labels = np.array(labels)
preds = np.loadtxt(preds_file)
num_cats = preds.shape[1]
num_ins = preds.shape[0]
probs = softmax(preds, axis=1)
preds = preds.argmax(1)
acc = np.sum(preds == labels) / len(labels)
print("accuracy of {}: {}".format(preds_file, acc))
def kl_divergence(p, q):
return np.sum(np.where(p != 0, p * np.log(p / q), 0), -1)
centroids = np.zeros((num_cats, num_cats))
cat_embeddings = np.zeros((num_cats, num_cats))
for i in range(num_cats):
centroids[i][i] = 1
cat_embeddings[i][i] = 1
all_avg_scores = []
all_accs = []
for k in range(100):
old_preds = preds.copy()
scores = np.zeros((num_ins, num_cats))
for i in range(num_ins):
for j in range(num_cats):
scores[i, j] = distance.jensenshannon(probs[i], centroids[j])
preds = scores.argmin(1)
avg_score = np.mean(scores.min(1))
all_avg_scores.append(avg_score)
acc = np.sum(preds == labels) / len(labels)
all_accs.append(acc)
num_updates = np.sum(preds != old_preds)
print("iteration: ", k, ", accuracy: ", acc, ", number of updates: ", num_updates)
if num_updates == 0 and k > 0:
break
new_centroids = []
for j in range(num_cats):
new_centroid = probs[preds == j]
if len(new_centroid) == 0:
new_centroid = centroids[j][None, :]
else:
new_centroid = new_centroid.mean(0, keepdims=True)
new_centroids.append(new_centroid)
centroids = np.concatenate(new_centroids, axis=0)
| 2,057 | 26.078947 | 86 | py |
ULR | ULR-main/single-encoder/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
preds = preds[:,1] - preds[:,0] # try argmax over pos scores
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
preds = preds.reshape((-1, len(cats)))
# code.interact(local=locals())
logger.info("save prediction file to eval_output_dir")
# out_file_name = cat_file_path.split("/")[-2]
task_name = args.class_file_name.split("/")[-2]
np.savetxt(os.path.join(eval_output_dir, task_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The file containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument("--class_file_name", type=str, default="classes.txt.acl", help="The file containing all class descriptions")
parser.add_argument("--pred_file_suffix", type=str, default=None, help="Suffix after the prediction file")
parser.add_argument("--pred_file_prefix", type=str, default=None, help="Prefix before the prediction file")
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = args.eval_data_file
# if args.task_name == "nyt":
# all_cats_file = os.path.join(args.eval_data_dir, args.class_file_name)
all_cats_file = args.class_file_name
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, file_path, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 22,069 | 38.981884 | 150 | py |
ULR | ULR-main/single-encoder/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 31,833 | 41.902965 | 177 | py |
GlobalSfMpy | GlobalSfMpy-main/setup.py | import os
import re
import subprocess
import sys
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection & inclusion of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DPYTHON_EXECUTABLE={sys.executable}",
f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
]
build_args = []
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSx on conda-forge)
if "CMAKE_ARGS" in os.environ:
cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
# In this example, we pass in the version to C++. You might not need to.
cmake_args += [f"-DEXAMPLE_VERSION_INFO={self.distribution.get_version()}"]
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator or cmake_generator == "Ninja":
try:
import ninja # noqa: F401
ninja_executable_path = os.path.join(ninja.BIN_DIR, "ninja")
cmake_args += [
"-GNinja",
f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
]
except ImportError:
pass
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"
]
build_args += ["--config", cfg]
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
if archs:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += [f"-j{self.parallel}"]
build_temp = os.path.join(self.build_temp, ext.name)
if not os.path.exists(build_temp):
os.makedirs(build_temp)
subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=build_temp)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=build_temp)
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="GlobalSfMpy",
version="0.0.1",
author="Ganlin Zhang",
author_email="zgl0315@gmail.com",
description="Global Structure from Motion library, based on TheiaSfM, enable two-view uncertainty and customized loss function.",
long_description="Global Structure from Motion library, based on TheiaSfM, enable two-view uncertainty and customized loss function.",
ext_modules=[CMakeExtension("GlobalSfMpy")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
extras_require={"test": ["pytest>=6.0"]},
python_requires=">=3.6",
)
| 5,750 | 41.286765 | 138 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/colmap_database.py |
import sys
import sqlite3
import numpy as np
IS_PYTHON3 = sys.version_info[0] >= 3
MAX_IMAGE_ID = 2**31 - 1
CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras (
camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
model INTEGER NOT NULL,
width INTEGER NOT NULL,
height INTEGER NOT NULL,
params BLOB,
prior_focal_length INTEGER NOT NULL)"""
CREATE_DESCRIPTORS_TABLE = """CREATE TABLE IF NOT EXISTS descriptors (
image_id INTEGER PRIMARY KEY NOT NULL,
rows INTEGER NOT NULL,
cols INTEGER NOT NULL,
data BLOB,
FOREIGN KEY(image_id) REFERENCES images(image_id) ON DELETE CASCADE)"""
CREATE_IMAGES_TABLE = """CREATE TABLE IF NOT EXISTS images (
image_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT NOT NULL UNIQUE,
camera_id INTEGER NOT NULL,
prior_qw REAL,
prior_qx REAL,
prior_qy REAL,
prior_qz REAL,
prior_tx REAL,
prior_ty REAL,
prior_tz REAL,
CONSTRAINT image_id_check CHECK(image_id >= 0 and image_id < {}),
FOREIGN KEY(camera_id) REFERENCES cameras(camera_id))
""".format(MAX_IMAGE_ID)
CREATE_TWO_VIEW_GEOMETRIES_TABLE = """
CREATE TABLE IF NOT EXISTS two_view_geometries (
pair_id INTEGER PRIMARY KEY NOT NULL,
rows INTEGER NOT NULL,
cols INTEGER NOT NULL,
data BLOB,
config INTEGER NOT NULL,
F BLOB,
E BLOB,
H BLOB)
"""
CREATE_KEYPOINTS_TABLE = """CREATE TABLE IF NOT EXISTS keypoints (
image_id INTEGER PRIMARY KEY NOT NULL,
rows INTEGER NOT NULL,
cols INTEGER NOT NULL,
data BLOB,
FOREIGN KEY(image_id) REFERENCES images(image_id) ON DELETE CASCADE)
"""
CREATE_MATCHES_TABLE = """CREATE TABLE IF NOT EXISTS matches (
pair_id INTEGER PRIMARY KEY NOT NULL,
rows INTEGER NOT NULL,
cols INTEGER NOT NULL,
data BLOB)"""
CREATE_NAME_INDEX = \
"CREATE UNIQUE INDEX IF NOT EXISTS index_name ON images(name)"
CREATE_ALL = "; ".join([
CREATE_CAMERAS_TABLE,
CREATE_IMAGES_TABLE,
CREATE_KEYPOINTS_TABLE,
CREATE_DESCRIPTORS_TABLE,
CREATE_MATCHES_TABLE,
CREATE_TWO_VIEW_GEOMETRIES_TABLE,
CREATE_NAME_INDEX
])
def image_ids_to_pair_id(image_id1, image_id2):
if image_id1 > image_id2:
image_id1, image_id2 = image_id2, image_id1
return image_id1 * MAX_IMAGE_ID + image_id2
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % MAX_IMAGE_ID
image_id1 = (pair_id - image_id2) / MAX_IMAGE_ID
return image_id1, image_id2
def array_to_blob(array):
if IS_PYTHON3:
return array.tostring()
else:
return np.getbuffer(array)
def blob_to_array(blob, dtype, shape=(-1,)):
if IS_PYTHON3:
return np.fromstring(blob, dtype=dtype).reshape(*shape)
else:
return np.frombuffer(blob, dtype=dtype).reshape(*shape)
class COLMAPDatabase(sqlite3.Connection):
@staticmethod
def connect(database_path):
return sqlite3.connect(database_path, factory=COLMAPDatabase)
def __init__(self, *args, **kwargs):
super(COLMAPDatabase, self).__init__(*args, **kwargs)
self.create_tables = lambda: self.executescript(CREATE_ALL)
self.create_cameras_table = \
lambda: self.executescript(CREATE_CAMERAS_TABLE)
self.create_descriptors_table = \
lambda: self.executescript(CREATE_DESCRIPTORS_TABLE)
self.create_images_table = \
lambda: self.executescript(CREATE_IMAGES_TABLE)
self.create_two_view_geometries_table = \
lambda: self.executescript(CREATE_TWO_VIEW_GEOMETRIES_TABLE)
self.create_keypoints_table = \
lambda: self.executescript(CREATE_KEYPOINTS_TABLE)
self.create_matches_table = \
lambda: self.executescript(CREATE_MATCHES_TABLE)
self.create_name_index = lambda: self.executescript(CREATE_NAME_INDEX)
def add_camera(self, model, width, height, params,
prior_focal_length=False, camera_id=None):
params = np.asarray(params, np.float64)
cursor = self.execute(
"INSERT INTO cameras VALUES (?, ?, ?, ?, ?, ?)",
(camera_id, model, width, height, array_to_blob(params),
prior_focal_length))
return cursor.lastrowid
def add_image(self, name, camera_id,
prior_q=np.full(4, np.NaN), prior_t=np.full(3, np.NaN), image_id=None):
cursor = self.execute(
"INSERT INTO images VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(image_id, name, camera_id, prior_q[0], prior_q[1], prior_q[2],
prior_q[3], prior_t[0], prior_t[1], prior_t[2]))
return cursor.lastrowid
def add_keypoints(self, image_id, keypoints):
assert(len(keypoints.shape) == 2)
assert(keypoints.shape[1] in [2, 4, 6])
keypoints = np.asarray(keypoints, np.float32)
self.execute(
"INSERT INTO keypoints VALUES (?, ?, ?, ?)",
(image_id,) + keypoints.shape + (array_to_blob(keypoints),))
def add_descriptors(self, image_id, descriptors):
descriptors = np.ascontiguousarray(descriptors, np.uint8)
self.execute(
"INSERT INTO descriptors VALUES (?, ?, ?, ?)",
(image_id,) + descriptors.shape + (array_to_blob(descriptors),))
def add_matches(self, image_id1, image_id2, matches):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
self.execute(
"INSERT INTO matches VALUES (?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches),))
def add_two_view_geometry(self, image_id1, image_id2, matches,
F=np.eye(3), E=np.eye(3), H=np.eye(3), config=2):
assert(len(matches.shape) == 2)
assert(matches.shape[1] == 2)
if image_id1 > image_id2:
matches = matches[:,::-1]
pair_id = image_ids_to_pair_id(image_id1, image_id2)
matches = np.asarray(matches, np.uint32)
F = np.asarray(F, dtype=np.float64)
E = np.asarray(E, dtype=np.float64)
H = np.asarray(H, dtype=np.float64)
self.execute(
"INSERT INTO two_view_geometries VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(pair_id,) + matches.shape + (array_to_blob(matches), config,
array_to_blob(F), array_to_blob(E), array_to_blob(H)))
def example_usage():
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--database_path", default="database.db")
args = parser.parse_args()
if os.path.exists(args.database_path):
print("ERROR: database path already exists -- will not modify it.")
return
# Open the database.
db = COLMAPDatabase.connect(args.database_path)
# For convenience, try creating all the tables upfront.
db.create_tables()
# Create dummy cameras.
model1, width1, height1, params1 = \
0, 1024, 768, np.array((1024., 512., 384.))
model2, width2, height2, params2 = \
2, 1024, 768, np.array((1024., 512., 384., 0.1))
camera_id1 = db.add_camera(model1, width1, height1, params1)
camera_id2 = db.add_camera(model2, width2, height2, params2)
# Create dummy images.
image_id1 = db.add_image("image1.png", camera_id1)
image_id2 = db.add_image("image2.png", camera_id1)
image_id3 = db.add_image("image3.png", camera_id2)
image_id4 = db.add_image("image4.png", camera_id2)
# Create dummy keypoints.
#
# Note that COLMAP supports:
# - 2D keypoints: (x, y)
# - 4D keypoints: (x, y, theta, scale)
# - 6D affine keypoints: (x, y, a_11, a_12, a_21, a_22)
num_keypoints = 1000
keypoints1 = np.random.rand(num_keypoints, 2) * (width1, height1)
keypoints2 = np.random.rand(num_keypoints, 2) * (width1, height1)
keypoints3 = np.random.rand(num_keypoints, 2) * (width2, height2)
keypoints4 = np.random.rand(num_keypoints, 2) * (width2, height2)
db.add_keypoints(image_id1, keypoints1)
db.add_keypoints(image_id2, keypoints2)
db.add_keypoints(image_id3, keypoints3)
db.add_keypoints(image_id4, keypoints4)
# Create dummy matches.
M = 50
matches12 = np.random.randint(num_keypoints, size=(M, 2))
matches23 = np.random.randint(num_keypoints, size=(M, 2))
matches34 = np.random.randint(num_keypoints, size=(M, 2))
db.add_matches(image_id1, image_id2, matches12)
db.add_matches(image_id2, image_id3, matches23)
db.add_matches(image_id3, image_id4, matches34)
# Commit the data to the file.
db.commit()
# Read and check cameras.
rows = db.execute("SELECT * FROM cameras")
camera_id, model, width, height, params, prior = next(rows)
params = blob_to_array(params, np.float64)
assert camera_id == camera_id1
assert model == model1 and width == width1 and height == height1
assert np.allclose(params, params1)
camera_id, model, width, height, params, prior = next(rows)
params = blob_to_array(params, np.float64)
assert camera_id == camera_id2
assert model == model2 and width == width2 and height == height2
assert np.allclose(params, params2)
# Read and check keypoints.
keypoints = dict(
(image_id, blob_to_array(data, np.float32, (-1, 2)))
for image_id, data in db.execute(
"SELECT image_id, data FROM keypoints"))
assert np.allclose(keypoints[image_id1], keypoints1)
assert np.allclose(keypoints[image_id2], keypoints2)
assert np.allclose(keypoints[image_id3], keypoints3)
assert np.allclose(keypoints[image_id4], keypoints4)
# Read and check matches.
pair_ids = [image_ids_to_pair_id(*pair) for pair in
((image_id1, image_id2),
(image_id2, image_id3),
(image_id3, image_id4))]
matches = dict(
(pair_id_to_image_ids(pair_id),
blob_to_array(data, np.uint32, (-1, 2)))
for pair_id, data in db.execute("SELECT pair_id, data FROM matches")
)
assert np.all(matches[(image_id1, image_id2)] == matches12)
assert np.all(matches[(image_id2, image_id3)] == matches23)
assert np.all(matches[(image_id3, image_id4)] == matches34)
# Clean up.
db.close()
if os.path.exists(args.database_path):
os.remove(args.database_path)
if __name__ == "__main__":
example_usage() | 10,542 | 31.946875 | 89 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/sfm_pipeline.py | import sys
import yaml
import os
import shutil
sys.path.append('../build')
import GlobalSfMpy as sfm
from loss_functions import *
def sfm_with_1dsfm_dataset(flagfile,dataset_path,
loss_func_rotation,
loss_func_position,
rotation_error_type=sfm.RotationErrorType.ANGLE_AXIS,
position_error_type=sfm.PositionErrorType.BASELINE,
onlyRotationAvg=False):
return sfm_pipeline(flagfile,dataset_path,
loss_func_rotation,
loss_func_position,
rotation_error_type,
position_error_type,
onlyRotationAvg,
use1DSfM = True)
def sfm_pipeline(flagfile,dataset_path,
loss_func_rotation,
loss_func_position,
rotation_error_type=sfm.RotationErrorType.ANGLE_AXIS,
position_error_type=sfm.PositionErrorType.BASELINE,
onlyRotationAvg=False,
use1DSfM = False):
if(use1DSfM):
options = sfm.ReconstructionBuilderOptions()
sfm.load_1DSFM_config(flagfile,options)
reconstruction = sfm.Reconstruction()
view_graph = sfm.ViewGraph()
rot_covariances = sfm.MapEdgesCovariance()
sfm.Read1DSFM(dataset_path,reconstruction,view_graph,rot_covariances)
reconstruction_builder = sfm.ReconstructionBuilder(options,reconstruction,view_graph)
else:
database = sfm.FeaturesAndMatchesDatabase(
dataset_path+"/database")
options = sfm.ReconstructionBuilderOptions()
sfm.load_1DSFM_config(flagfile,options)
rot_covariances = sfm.MapEdgesCovariance()
sfm.ReadCovariance(dataset_path,rot_covariances)
reconstruction_builder = sfm.ReconstructionBuilder(options,database)
sfm.AddColmapMatchesToReconstructionBuilder(dataset_path+"/two_views.txt",dataset_path+"/images/*.JPG",reconstruction_builder)
reconstruction_builder.CheckView()
view_graph = reconstruction_builder.get_view_graph()
reconstruction = reconstruction_builder.get_reconstruction()
reconstruction_estimator = sfm.GlobalReconstructionEstimator(options.reconstruction_estimator_options)
# Step 1. Filter the initial view graph and remove any bad two view geometries.
# Step 2. Calibrate any uncalibrated cameras.
reconstruction_estimator.FilterInitialViewGraphAndCalibrateCameras(view_graph,reconstruction)
# Step 3. Estimate global rotations.
assert(reconstruction_estimator.EstimateGlobalRotationsUncertainty(loss_func_rotation,
rot_covariances,
rotation_error_type))
if(onlyRotationAvg):
# Set the poses in the reconstruction object.
sfm.SetOrientations(
reconstruction_estimator.orientations,
reconstruction
)
return reconstruction
# Step 4. Filter bad rotations.
reconstruction_estimator.FilterRotations()
# Step 5. Optimize relative translations.
reconstruction_estimator.OptimizePairwiseTranslations()
# Step 6. Filter bad relative translations.
reconstruction_estimator.FilterRelativeTranslation()
# Step 7. Estimate global positions.
assert(reconstruction_estimator.EstimatePosition(loss_func_position,position_error_type))
# Set the poses in the reconstruction object.
sfm.SetReconstructionFromEstimatedPoses(
reconstruction_estimator.orientations,
reconstruction_estimator.positions,
reconstruction
)
# Always triangulate once, then retriangulate and remove outliers depending
# on the reconstruciton estimator options.
for i in range(1+reconstruction_estimator.options.num_retriangulation_iterations):
# Step 8. Triangulate features.
reconstruction_estimator.EstimateStructure()
sfm.SetUnderconstrainedAsUnestimated(reconstruction)
# Do a single step of bundle adjustment where only the camera positions and
# 3D points are refined. This is only done for the very first bundle
# adjustment iteration.
if i == 0 and reconstruction_estimator.options.refine_camera_positions_and_points_after_position_estimation:
reconstruction_estimator.BundleAdjustCameraPositionsAndPoints()
# Step 9. Bundle Adjustment.
reconstruction_estimator.BundleAdjustmentAndRemoveOutlierPoints()
if (not use1DSfM) and os.path.exists(dataset_path+"/database"):
shutil.rmtree(dataset_path+"/database")
return reconstruction
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: sfm_with_1dsfm_dataset.py <flagfile>')
sys.exit(1)
flagfile = sys.argv[1]
f = open(flagfile,"r")
config = yaml.safe_load(f)
dataset_path = config['1dsfm_dataset_directory']
output_reconstruction = config['output_reconstruction']
glog_directory = config['glog_directory']
glog_verbose = config['v']
log_to_stderr = config['log_to_stderr']
sfm.InitGlog(glog_verbose,log_to_stderr,glog_directory)
position_error_type = sfm.PositionErrorType.BASELINE
rotation_error_type = sfm.RotationErrorType.ANGLE_AXIS_COVARIANCE
if not os.path.exists(dataset_path+"/covariance_rot.txt"):
sfm.CalcCovariance(dataset_path)
loss_func = MAGSACWeightBasedLoss(0.02)
reconstruction = sfm_with_1dsfm_dataset(flagfile,dataset_path,
loss_func,HuberLoss(0.1),
rotation_error_type,position_error_type,
onlyRotationAvg=False)
if os.path.exists(output_reconstruction):
os.remove(output_reconstruction)
sfm.WriteReconstruction(reconstruction, output_reconstruction)
sfm.StopGlog()
| 6,221 | 41.040541 | 134 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/loss_functions.py | from math import sqrt,log,atan2,exp,cosh
from pickle import FALSE
from re import S
import sys
sys.path.append('../build')
import GlobalSfMpy as sfm
# For a residual vector with squared 2-norm 'sq_norm', this method
# is required to fill in the value and derivatives of the loss
# function (rho in this example):
#
# out[0] = rho(sq_norm),
# out[1] = rho'(sq_norm),
# out[2] = rho''(sq_norm),
#
# Here the convention is that the contribution of a term to the
# cost function is given by 1/2 rho(s), where
#
# s = ||residuals||^2.
#
# Calling the method with a negative value of 's' is an error and
# the implementations are not required to handle that case.
#
# Most sane choices of rho() satisfy:
#
# rho(0) = 0,
# rho'(0) = 1,
# rho'(s) < 1 in outlier region,
# rho''(s) < 0 in outlier region,
#
# so that they mimic the least squares cost for small residuals.
# Scaling
# -------
# Given one robustifier
# s -> rho(s)
# one can change the length scale at which robustification takes
# place, by adding a scale factor 'a' as follows:
#
# s -> a^2 rho(s / a^2).
#
# The first and second derivatives are:
#
# s -> rho'(s / a^2),
# s -> (1 / a^2) rho''(s / a^2),
class TrivialLoss(sfm.LossFunction):
# rho(s)=s
def __init__(self):
super().__init__()
def Evaluate(self, sq_norm, out):
out[0] = sq_norm;
out[1] = 1.0
out[2] = 0.0
class HuberLoss(sfm.LossFunction):
# rho(s) = s for s <= 1,
# rho(s) = 2 sqrt(s) - 1 for s >= 1.
def __init__(self,a):
self.a = a
self.b = a*a
super().__init__()
def Evaluate(self, sq_norm, out):
if sq_norm > self.b:
r = sqrt(sq_norm)
out[0] = 2*self.a*r - self.b
out[1] = max(self.a/r, sys.float_info.min)
out[2] = -out[1]/(2.0*sq_norm)
else:
out[0] = sq_norm
out[1] = 1.0
out[2] = 0.0
class SoftLOneLoss(sfm.LossFunction):
# rho(s) = 2 (sqrt(1 + s) - 1).
def __init__(self,a):
self.a = a
self.b = a*a
self.c = 1.0/self.b
super().__init__()
def Evaluate(self, sq_norm, out):
sum = 1.0 + sq_norm*self.c
tmp = sqrt(sum)
out[0] = 2.0 * self.b * (tmp - 1.0)
out[1] = max(1.0/tmp, sys.float_info.min)
out[2] = -(self.c*out[1])/(2.0*sum)
class CauchyLoss(sfm.LossFunction):
# rho(s) = log(1 + s).
def __init__(self,a):
self.b = a*a
self.c = 1.0/self.b
super().__init__()
def Evaluate(self, sq_norm, out):
sum = 1.0 + sq_norm*self.c
inv = 1.0/sum
out[0] = self.b * log(sum)
out[1] = max(inv, sys.float_info.min)
out[2] = -self.c * (inv * inv)
class ArctanLoss(sfm.LossFunction):
# rho(s) = a arctan(s / a).
def __init__(self,a):
self.a = a
self.b = 1 / (a*a)
super().__init__()
def Evaluate(self, sq_norm, out):
sum = 1 + sq_norm*sq_norm*self.b
inv = 1.0/sum
out[0] = self.a * atan2(sq_norm, self.a)
out[1] = max(inv, sys.float_info.min)
out[2] = -2.0 * sq_norm * self.b * (inv * inv)
class TolerantLoss(sfm.LossFunction):
# Loss function that maps to approximately zero cost in a range around the
# origin, and reverts to linear in error (quadratic in cost) beyond this range.
# The tolerance parameter 'a' sets the nominal point at which the
# transition occurs, and the transition size parameter 'b' sets the nominal
# distance over which most of the transition occurs. Both a and b must be
# greater than zero, and typically b will be set to a fraction of a.
# The slope rho'[s] varies smoothly from about 0 at s <= a - b to
# about 1 at s >= a + b.
#
# The term is computed as:
#
# rho(s) = b log(1 + exp((s - a) / b)) - c0.
#
# where c0 is chosen so that rho(0) == 0
#
# c0 = b log(1 + exp(-a / b)
#
# This has the following useful properties:
#
# rho(s) == 0 for s = 0
# rho'(s) ~= 0 for s << a - b
# rho'(s) ~= 1 for s >> a + b
# rho''(s) > 0 for all s
#
# In addition, all derivatives are continuous, and the curvature is
# concentrated in the range a - b to a + b.
#
# At s = 0: rho = [0, ~0, ~0].
def __init__(self,a,b):
self.a = a
self.b = b
self.c = b * log(1 + exp(-a / b))
assert(a >= 0)
assert(b > 0)
super().__init__()
def Evaluate(self, sq_norm, out):
x = (sq_norm - self.a) / self.b;
# The basic equation is rho[0] = b ln(1 + e^x). However, if e^x is too
# large, it will overflow. Since numerically 1 + e^x == e^x when the
# x is greater than about ln(2^53) for doubles, beyond this threshold
# we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
kLog2Pow53 = 36.7; # ln(MathLimits<double>::kEpsilon).
if (x > kLog2Pow53):
out[0] = sq_norm - self.a - self.c;
out[1] = 1.0;
out[2] = 0.0;
else:
e_x = exp(x);
out[0] = self.b * log(1.0 + e_x) - self.c;
out[1] = max(e_x / (1.0 + e_x), sys.float_info.min);
out[2] = 0.5 / (self.b * (1.0 + cosh(x)));
class TukeyLoss(sfm.LossFunction):
# rho(s) = a^2 / 6 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
# rho(s) = a^2 / 6 for s > a^2.
def __init__(self,a):
self.a_squared = a*a
super().__init__()
def Evaluate(self, sq_norm, out):
if sq_norm <= self.a_squared:
# Inlier region.
value = 1.0 - sq_norm / self.a_squared;
value_sq = value * value;
out[0] = self.a_squared / 6.0 * (1.0 - value_sq * value);
out[1] = 0.5 * value_sq;
out[2] = -1.0 / self.a_squared * value;
else:
# Outlier region.
out[0] = self.a_squared / 6.0;
out[1] = 0.0;
out[2] = 0.0;
class LOneHalfLoss(sfm.LossFunction):
# Scaling
# -------
# Given one robustifier
# s -> rho(s)
# one can change the length scale at which robustification takes
# place, by adding a scale factor 'a' as follows:
#
# s -> a^2 rho(s / a^2).
#
# The first and second derivatives are:
#
# s -> rho'(s / a^2),
# s -> (1 / a^2) rho''(s / a^2),
# rho(s) = 2*|s|^0.5
# rho'(s) = sign(s)|s|^(-0.5)
# rho''(s) = |s|^(-1.5)
def __init__(self,a):
self.a = a
self.sqrt_a = sqrt(a)
super().__init__()
def Evaluate(self, sq_norm, out):
out[0] = 2.0*self.a*self.sqrt_a*pow(sq_norm,0.25)
if sq_norm < 0.01:
sq_norm = 0.01
out[1] = 0.5*pow(self.a,-1.5)*pow(sq_norm,-0.75)
out[2] = -0.375*self.a*self.sqrt_a*pow(sq_norm,-1.75)
class LTwoLoss(sfm.LossFunction):
# Scaling
# -------
# Given one robustifier
# s -> rho(s)
# one can change the length scale at which robustification takes
# place, by adding a scale factor 'a' as follows:
#
# s -> a^2 rho(s / a^2).
#
# The first and second derivatives are:
#
# s -> rho'(s / a^2),
# s -> (1 / a^2) rho''(s / a^2),
def __init__(self,a,sigma2):
self.a_sq = a*a
super().__init__()
def Evaluate(self, sq_norm, out):
out[0] = sq_norm*sq_norm/(self.a_sq*2.0)
out[1] = sq_norm/self.a_sq
out[2] = 1/self.a_sq
class GemanMcClureLoss(sfm.LossFunction):
def __init__(self, a, sigma2):
self.a_sq = a*a
self.sigma2 = sigma2
super().__init__()
def Evaluate(self, sq_norm, out):
out[0] = self.a_sq * self.sigma2*sq_norm / (2.0*(sq_norm+self.a_sq*self.sigma2))
out[1] = (self.sigma2**2) / (2.0*(sq_norm/self.a_sq + self.sigma2)**2)
out[2] = -(self.sigma2**2) / (self.a_sq*(sq_norm/self.a_sq+self.sigma2)**3)
class ComposedLoss(sfm.LossFunction):
# rho(s) = f(g(s))
def __init__(self,f:sfm.LossFunction, g:sfm.LossFunction):
self.f = f
self.g = g
super().__init__()
def Evaluate(self, sq_norm, out):
out_f = [0,0,0]
out_g = [0,0,0]
self.g.Evaluate(sq_norm, out_g)
self.f.Evaluate(out_g[0], out_f)
out[0] = out_f[0]
# f'(g(s)) * g'(s).
out[1] = out_f[1] * out_g[1];
# f''(g(s)) * g'(s) * g'(s) + f'(g(s)) * g''(s).
out[2] = out_f[2] * out_g[1] * out_g[1] + out_f[1] * out_g[2]
class ScaledLoss(sfm.LossFunction):
# If rho is the wrapped robustifier, then this simply outputs
# s -> a * rho(s)
# The first and second derivatives are, not surprisingly
# s -> a * rho'(s)
# s -> a * rho''(s)
def __init__(self, rho:sfm.LossFunction, a):
self.rho = rho
self.a = a
super().__init__()
def Evaluate(self, sq_norm, out):
self.rho.Evaluate(sq_norm, out)
out[0] *= self.a
out[1] *= self.a
out[2] *= self.a
class MAGSACWeightBasedLoss(sfm.LossFunction):
def __init__(self,sigma,inverse = False):
self.sigma_max = sigma
self.nu = sfm.nu3
self.squared_sigma = self.sigma_max * self.sigma_max
self.squared_sigma_max_2 = 2.0 * self.squared_sigma
self.cubed_sigma_max = self.squared_sigma*self.sigma_max
self.dof_minus_one_per_two = (self.nu - 1.0) / 2.0
self.C_times_two_ad_dof = sfm.C3 * (2**self.dof_minus_one_per_two)
self.one_over_sigma = self.C_times_two_ad_dof / self.sigma_max
self.gamma_value = sfm.tgamma(self.dof_minus_one_per_two)
self.gamma_difference = self.gamma_value - sfm.upper_incomplete_gamma_of_k3
self.weight_zero = self.one_over_sigma * self.gamma_difference
self.use_weight_inverse = inverse
super().__init__()
def Evaluate(self, squared_residual, rho):
zero_derivative = False;
if (squared_residual>sfm.sigma_quantile3*sfm.sigma_quantile3*self.squared_sigma):
squared_residual = sfm.sigma_quantile3*sfm.sigma_quantile3*self.squared_sigma
zero_derivative = True
x = round(sfm.precision_of_stored_gamma3 * squared_residual / self.squared_sigma_max_2)
if sfm.stored_gamma_number3 < x:
x = sfm.stored_gamma_number3
s = x * self.squared_sigma_max_2 / sfm.precision_of_stored_gamma3
weight = self.one_over_sigma * (sfm.stored_gamma_values3[x] - sfm.upper_incomplete_gamma_of_k3)
weight_derivative = -self.C_times_two_ad_dof\
* ((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5))\
* exp(-s/self.squared_sigma_max_2) / (2*self.cubed_sigma_max)
if(s < 1e-7):
s = 1e-7
weight_second_derivative = 2.0 * self.C_times_two_ad_dof *\
((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5)) *\
(1.0/self.squared_sigma - (self.nu-3)/s) *\
exp(-s/self.squared_sigma_max_2) / (8*self.cubed_sigma_max);
if self.use_weight_inverse:
rho[0] = 1.0/weight;
rho[1] = -1.0/(weight*weight)*weight_derivative;
rho[2] = 2.0/(weight*weight*weight)*weight_derivative*weight_derivative\
- weight_second_derivative/(weight*weight);
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0;
else:
rho[0] = self.weight_zero-weight
rho[1] = -weight_derivative
rho[2] = -weight_second_derivative
if rho[1] == 0:
rho[1] = 0.00001;
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0;
class MAGSACWeightBasedLoss4(sfm.LossFunction):
def __init__(self,sigma,inverse = True):
self.sigma_max = sigma
self.nu = sfm.nu4
self.squared_sigma = self.sigma_max * self.sigma_max
self.squared_sigma_max_2 = 2.0 * self.squared_sigma
self.cubed_sigma_max = self.squared_sigma*self.sigma_max
self.dof_minus_one_per_two = (self.nu - 1.0) / 2.0
self.C_times_two_ad_dof = sfm.C4 * (2**self.dof_minus_one_per_two)
self.one_over_sigma = self.C_times_two_ad_dof / self.sigma_max
self.gamma_value = sfm.tgamma(self.dof_minus_one_per_two)
self.gamma_difference = self.gamma_value - sfm.upper_incomplete_gamma_of_k4
self.weight_zero = self.one_over_sigma * self.gamma_difference
self.use_weight_inverse = inverse
super().__init__()
def Evaluate(self, squared_residual, rho):
zero_derivative = False;
if (squared_residual>sfm.sigma_quantile4*sfm.sigma_quantile4*self.squared_sigma):
squared_residual = sfm.sigma_quantile4*sfm.sigma_quantile4*self.squared_sigma
zero_derivative = True
x = round(sfm.precision_of_stored_gamma4 * squared_residual / self.squared_sigma_max_2)
if sfm.stored_gamma_number4 < x:
x = sfm.stored_gamma_number4
s = x * self.squared_sigma_max_2 / sfm.precision_of_stored_gamma4
weight = self.one_over_sigma * (sfm.stored_gamma_values4[x] - sfm.upper_incomplete_gamma_of_k4)
weight_derivative = -self.C_times_two_ad_dof\
* ((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5))\
* exp(-s/self.squared_sigma_max_2) / (2*self.cubed_sigma_max)
if(s < 1e-7):
s = 1e-7
weight_second_derivative = 2.0 * self.C_times_two_ad_dof *\
((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5)) *\
(1.0/self.squared_sigma - (self.nu-3)/s) *\
exp(-s/self.squared_sigma_max_2) / (8*self.cubed_sigma_max);
if self.use_weight_inverse:
rho[0] = 1.0/weight;
rho[1] = -1.0/(weight*weight)*weight_derivative;
rho[2] = 2.0/(weight*weight*weight)*weight_derivative*weight_derivative\
- weight_second_derivative/(weight*weight);
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0;
else:
rho[0] = self.weight_zero-weight
rho[1] = -weight_derivative
rho[2] = -weight_second_derivative
if rho[1] == 0:
rho[1] = 0.00001;
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0;
class MAGSACWeightBasedLoss9(sfm.LossFunction):
def __init__(self,sigma,inverse = False):
self.sigma_max = sigma
self.nu = sfm.nu9
self.squared_sigma = self.sigma_max * self.sigma_max
self.squared_sigma_max_2 = 2.0 * self.squared_sigma
self.cubed_sigma_max = self.squared_sigma*self.sigma_max
self.dof_minus_one_per_two = (self.nu - 1.0) / 2.0
self.C_times_two_ad_dof = sfm.C9 * (2**self.dof_minus_one_per_two)
self.one_over_sigma = self.C_times_two_ad_dof / self.sigma_max
self.gamma_value = sfm.tgamma(self.dof_minus_one_per_two)
self.gamma_difference = self.gamma_value - sfm.upper_incomplete_gamma_of_k9
self.weight_zero = self.one_over_sigma * self.gamma_difference
self.use_weight_inverse = inverse
super().__init__()
def Evaluate(self, squared_residual, rho):
zero_derivative = False;
if (squared_residual>sfm.sigma_quantile9*sfm.sigma_quantile9*self.squared_sigma):
squared_residual = sfm.sigma_quantile9*sfm.sigma_quantile9*self.squared_sigma
zero_derivative = True
x = round(sfm.precision_of_stored_gamma9 * squared_residual / self.squared_sigma_max_2)
if sfm.stored_gamma_number9 < x:
x = sfm.stored_gamma_number9
s = x * self.squared_sigma_max_2 / sfm.precision_of_stored_gamma9
weight = self.one_over_sigma * (sfm.stored_gamma_values9[x] - sfm.upper_incomplete_gamma_of_k9)
weight_derivative = -self.C_times_two_ad_dof\
* ((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5))\
* exp(-s/self.squared_sigma_max_2) / (2*self.cubed_sigma_max)
if(s < 1e-7):
s = 1e-7
weight_second_derivative = 2.0 * self.C_times_two_ad_dof *\
((s/self.squared_sigma_max_2)**(self.nu/2 - 1.5)) *\
(1.0/self.squared_sigma - (self.nu-3)/s) *\
exp(-s/self.squared_sigma_max_2) / (8*self.cubed_sigma_max);
if self.use_weight_inverse:
rho[0] = 1.0/weight;
rho[1] = -1.0/(weight*weight)*weight_derivative;
rho[2] = 2.0/(weight*weight*weight)*weight_derivative*weight_derivative\
- weight_second_derivative/(weight*weight);
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0;
else:
rho[0] = self.weight_zero-weight
rho[1] = -weight_derivative
rho[2] = -weight_second_derivative
if rho[1] == 0:
rho[1] = 0.00001;
if zero_derivative:
rho[1] = 0.00001;
rho[2] = 0.0; | 17,372 | 36.849673 | 103 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/get_covariance_from_colmap.py | import sys
import yaml
import os
sys.path.append('../build')
import GlobalSfMpy as sfm
dataset_name = "facade"
flagfile = "../flags_1dsfm.yaml"
dataset_path = "../datasets/"+dataset_name
if os.path.exists(dataset_path+"/covariance_rot.txt"):
print("Covariance already exists!")
exit()
f = open(flagfile,"r")
config = yaml.safe_load(f)
glog_directory = config['glog_directory']
glog_verbose = config['v']
log_to_stderr = False
sfm.InitGlog(glog_verbose,log_to_stderr,glog_directory)
database = sfm.FeaturesAndMatchesDatabase(
dataset_path+"/database")
options = sfm.ReconstructionBuilderOptions()
sfm.load_1DSFM_config(flagfile,options)
reconstruction_builder = sfm.ReconstructionBuilder(options,database)
sfm.AddColmapMatchesToReconstructionBuilder(dataset_path+"/two_views.txt",reconstruction_builder,database)
reconstruction_builder.CheckView()
view_graph = reconstruction_builder.get_view_graph()
reconstruction = reconstruction_builder.get_reconstruction()
sfm.store_covariance_rot(dataset_path,reconstruction,view_graph)
sfm.StopGlog()
| 1,071 | 26.487179 | 106 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/read_colmap_database.py | from colmap_database import *
import os
import argparse
from scipy.spatial.transform import Rotation
import cv2 as cv
import poselib
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", default="../datasets/facade")
args = parser.parse_args()
dataset_path = args.dataset_path
class Pose:
def __init__(self,qw,qx,qy,qz,tx,ty,tz):
self.qw = qw
self.qx = qx
self.qy = qy
self.qz = qz
self.tx = tx
self.ty = ty
self.tz = tz
def rotation_matrix(self):
R = Rotation.from_quat([self.qx,self.qy,self.qz,self.qw])
return R.as_matrix()
def translation(self):
return np.array([[self.tx],[self.ty],[self.tz]])
f_camera = open(dataset_path+"/cameras.txt")
cameras = {}
lines = f_camera.readlines()
for line in lines[3:]:
words = line.split()
camera_id = np.uint32(words[0])
camera_width = np.int32(words[2])
camera_height = np.int32(words[3])
fx = np.float64(words[4])
fy = np.float64(words[5])
ux = np.float64(words[6])
uy = np.float64(words[7])
cameras[camera_id] = [camera_width,camera_height,fx,fy,ux,uy]
f_camera.close()
f_images = open(dataset_path+"/images.txt")
images = {}
lines = f_images.readlines()[4:]
for i in range(len(lines)):
if i%2 == 1:
continue
words = lines[i].split()
image_name = words[-1].split("/")[-1]
camera_id = np.uint32(words[-2])
qw = np.float64(words[1])
qx = np.float64(words[2])
qy = np.float64(words[3])
qz = np.float64(words[4])
tx = np.float64(words[5])
ty = np.float64(words[6])
tz = np.float64(words[7])
images[image_name]=[camera_id,Pose(qw,qx,qy,qz,tx,ty,tz)]
f_images.close()
# Open the database.
database_path = dataset_path+"/colmap/database.db"
db = COLMAPDatabase.connect(database_path)
two_view_geometries = db.execute("SELECT * FROM two_view_geometries")
write_path = dataset_path + "/two_views.txt"
f_write = open(write_path,"w")
f_write.write("# img_name1 image_name2 f1 f2 num_inlier rot[0] rot[1] rot[2] trans[0] trans[1] trans[2]\n"
+"# features1 [p0x p0y p1x p1y ...]\n"+ "# features2 [p0x p0y p1x p1y ...]\n")
for two_view_geometry in two_view_geometries:
pair_id = two_view_geometry[0]
if two_view_geometry[3]== None:
continue
matches = blob_to_array(two_view_geometry[3],np.uint32,(-1,2))
image_id1,image_id2 = pair_id_to_image_ids(pair_id)
image_id1 = np.uint32(image_id1)
image1 = db.execute("SELECT name FROM images WHERE image_id={0}".format(image_id1))
image2 = db.execute("SELECT name FROM images WHERE image_id={0}".format(image_id2))
image1_name = next(image1)[0]
image2_name = next(image2)[0]
image1_name = image1_name.split("/")[-1]
image2_name = image2_name.split("/")[-1]
F = blob_to_array(two_view_geometry[5],np.float64,(3,3))
feature1 = db.execute("SELECT data FROM keypoints WHERE image_id={0}".format(image_id1))
feature1_cols = db.execute("SELECT cols FROM keypoints WHERE image_id={0}".format(image_id1))
feature1_cols = next(feature1_cols)[0]
feature1 = next(feature1)[0]
feature1 = blob_to_array(feature1,np.float32,(-1,feature1_cols))
feature2 = db.execute("SELECT data FROM keypoints WHERE image_id={0}".format(image_id2))
feature2_cols = db.execute("SELECT cols FROM keypoints WHERE image_id={0}".format(image_id2))
feature2_cols = next(feature2_cols)[0]
feature2 = next(feature2)[0]
feature2 = blob_to_array(feature2,np.float32,(-1,feature2_cols))
feature1 = feature1[:,0:2]
feature2 = feature2[:,0:2]
corrresponding1 = feature1[matches[:,0],:]
corrresponding2 = feature2[matches[:,1],:]
camera1 = cameras[images[image1_name][0]]
camera2 = cameras[images[image2_name][0]]
pose1 = images[image1_name][1]
pose2 = images[image2_name][1]
camera1 = {'model': 'PINHOLE', 'width': camera1[0], 'height': camera1[1], 'params': [camera1[2],camera1[3],camera1[4],camera1[5]]}
camera2 = {'model': 'PINHOLE', 'width': camera2[0], 'height': camera2[1], 'params': [camera2[2],camera2[3],camera2[4],camera2[5]]}
K1 = np.array([[camera1['params'][0],0.0,camera1['params'][2]],
[0.0,camera1['params'][1],camera1['params'][3]],
[0,0,1]])
K2 = np.array([[camera2['params'][0],0.0,camera2['params'][2]],
[0.0,camera2['params'][1],camera2['params'][3]],
[0,0,1]])
result = poselib.estimate_relative_pose(corrresponding1,corrresponding2,camera1,camera2)
relative_pose = result[0]
num_inliers = result[1]["num_inliers"]
inliers_mask = result[1]["inliers"]
point1 = corrresponding1[inliers_mask,:]
point2 = corrresponding2[inliers_mask,:]
rotation = Rotation.from_quat([relative_pose.q[1],relative_pose.q[2],relative_pose.q[3],relative_pose.q[0]]).as_rotvec()
translation = relative_pose.t
f1 = (camera1['params'][0]+camera1['params'][1])/2.0
f2 = (camera2['params'][0]+camera2['params'][1])/2.0
f_write.write("{} {} {} {} {} {} {} {} {} {} {}\n".format(image1_name,image2_name,f1,f2,num_inliers,
rotation[0],rotation[1],rotation[2],
translation[0],translation[1],translation[2]))
point1_string = ""
point2_string = ""
for i in range(num_inliers):
point1_string += "{} {} ".format(point1[i,0],point1[i,1])
point2_string += "{} {} ".format(point2[i,0],point2[i,1])
point1_string+="\n"
point2_string+="\n"
f_write.write(point1_string)
f_write.write(point2_string)
f_write.close()
db.close()
| 5,742 | 35.814103 | 134 | py |
GlobalSfMpy | GlobalSfMpy-main/scripts/sfm_with_colmap_feature.py | import sys
import yaml
import os
sys.path.append('../build')
import GlobalSfMpy as sfm
from loss_functions import *
from sfm_pipeline import *
flagfile = "../flags_1dsfm.yaml"
f = open(flagfile,"r")
config = yaml.safe_load(f)
glog_directory = config['glog_directory']
glog_verbose = config['v']
log_to_stderr = config['log_to_stderr']
sfm.InitGlog(glog_verbose,log_to_stderr,glog_directory)
dataset_name = "facade"
# dataset_path = "../datasets/"+dataset_name
dataset_path = "/home/zhangganlin/Desktop/CVG/final_version_globalsfmpy/datasets/"+dataset_name
colmap_path = dataset_path+"/colmap/images.txt"
output_reconstruction = "../output/"+dataset_name
rotation_error_type = sfm.RotationErrorType.ANGLE_AXIS_COVARIANCE
position_error_type = sfm.PositionErrorType.BASELINE
reconstruction = sfm_pipeline(flagfile,dataset_path,
MAGSACWeightBasedLoss(0.02),HuberLoss(0.1),
rotation_error_type,position_error_type,
onlyRotationAvg=False)
if os.path.exists(output_reconstruction):
os.remove(output_reconstruction)
sfm.WriteReconstruction(reconstruction, output_reconstruction)
sfm.StopGlog()
| 1,233 | 29.85 | 95 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/main.py | import argparse
from os import path, makedirs
from experiments import select_experiment
import torch
import yaml
import os
def create_dir_structure(config):
subdirs = ["ckpt", "config", "generated", "log"]
structure = {subdir: path.join(config["base_dir"],config["experiment"],subdir,config["project_name"]) for subdir in subdirs}
if "DATAPATH" in os.environ:
structure = {subdir: path.join(os.environ["DATAPATH"],structure[subdir]) for subdir in structure}
return structure
def load_parameters(config_name, restart,debug,project_name):
with open(config_name,"r") as f:
cdict = yaml.load(f,Loader=yaml.FullLoader)
if debug:
cdict['general']['project_name'] = 'debug'
else:
cdict['general']['project_name'] = project_name
dir_structure = create_dir_structure(cdict["general"])
saved_config = path.join(dir_structure["config"], "config.yaml")
if restart:
if path.isfile(saved_config):
with open(saved_config,"r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise FileNotFoundError("No saved config file found but model is intended to be restarted. Aborting....")
else:
[makedirs(dir_structure[d],exist_ok=True) for d in dir_structure]
if path.isfile(saved_config) and not debug:
print(f"\033[93m" + "WARNING: Model has been started somewhen earlier: Resume training (y/n)?" + "\033[0m")
while True:
answer = input()
if answer == "y" or answer == "yes":
with open(saved_config,"r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
restart = True
break
elif answer == "n" or answer == "no":
with open(saved_config, "w") as f:
yaml.dump(cdict, f, default_flow_style=False)
break
else:
print(f"\033[93m" + "Invalid answer! Try again!(y/n)" + "\033[0m")
else:
with open(saved_config, "w") as f:
yaml.dump(cdict,f,default_flow_style=False)
cdict['general']['debug'] = debug
return cdict, dir_structure, restart
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
default="config/latent_flow_net.yaml",
help="Define config file")
parser.add_argument('-p','--project_name',type=str,default='ii2v',help='unique name for the training run to be (re-)started.')
parser.add_argument("-r","--restart", default=False,action="store_true",help="Whether training should be resumed.")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Whether training should be resumed.")
parser.add_argument("--gpu",default=[0], type=int,
nargs="+",help="GPU to use.")
parser.add_argument("-m","--mode",default="train",type=str,choices=["train","test"],help="Whether to start in train or infer mode?")
parser.add_argument("--test_mode",default="metrics",type=str, choices=["noise_test","metrics","fvd",'diversity','render'], help="The mode in which the test-method should be executed.")
parser.add_argument("--metrics_on_patches", default=False,action="store_true",help="Whether to run evaluation on patches (if available or not).")
parser.add_argument("--best_ckpt", default=False, action="store_true",help="Whether to use the best ckpt as measured by LPIPS (otherwise, latest_ckpt is used)")
args = parser.parse_args()
config, structure, restart = load_parameters(args.config, args.restart or args.mode == "test",args.debug,args.project_name)
config["general"]["restart"] = restart
config["general"]["mode"] = args.mode
# config["general"]["first_stage"] = args.first_stage
if len(args.gpu) == 1:
gpus = torch.device(
f"cuda:{int(args.gpu[0])}"
if torch.cuda.is_available() and int(args.gpu[0]) >= 0
else "cpu"
)
torch.cuda.set_device(gpus)
else:
gpus = [int(id) for id in args.gpu]
mode = config["general"]["mode"]
config["testing"].update({"best_ckpt": args.best_ckpt})
if mode == "test" and "testing" in config and "metrics_on_patches" in config["testing"]:
config["testing"]["metrics_on_patches"] = args.metrics_on_patches
experiment = select_experiment(config, structure, gpus)
# start selected experiment
if mode == "train":
experiment.train()
elif mode == "test":
config["testing"].update({"mode": args.test_mode})
experiment.test()
else:
raise ValueError(f"\"mode\"-parameter should be either \"train\" or \"infer\" but is actually {mode}") | 4,862 | 44.448598 | 188 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/discriminator.py | import torch
from torch import nn
from torch.optim import Adam
import functools
from torch.nn.utils import spectral_norm
import math
import numpy as np
from utils.general import get_member
from models.blocks import SPADE
class GANTrainer(object):
def __init__(self, config, load_fn,logger,spatial_size=128, parallel=False, devices=None, debug=False, temporal=False, sequence_length = None):
self.config = config
self.logger = logger
# disc
self.logger.info("Load discriminator model")
self.temporal = temporal
if self.temporal:
assert sequence_length is not None
self.key = "gan_temp"
self.disc = resnet(config=config[self.key],spatial_size=spatial_size,sequence_length=sequence_length)
self.load_key = "disc_temp"
self.postfix = "temp"
if self.disc.cond:
self.logger.info(f"Using Conditional temporal discriminator.")
else:
self.key = "gan"
self.disc = PatchDiscriminator(self.config[self.key])
self.load_key = "disc_patch"
self.postfix = "patch"
self.cond = self.config[self.key]["conditional"] if self.temporal and "conditional" in self.config[self.key] else False
self.logger.info(f"Number of parameters in discriminator_{self.postfix} is {sum(p.numel() for p in self.disc.parameters())}.")
self.parallel = parallel
self.devices = devices
if self.parallel:
assert self.devices is not None
# load checkpoint if there's any and it is required
disc_ckpt = disc_op_ckpt = None
if self.config["general"]["restart"] and not debug:
disc_ckpt, disc_op_ckpt = load_fn(key=self.load_key)
if disc_ckpt is not None:
self.logger.info(f"Resuming training of discriminator...loading weights.")
self.disc.load_state_dict(disc_ckpt)
if self.parallel:
self.disc = nn.DataParallel(self.disc,device_ids=self.devices)
self.disc.cuda(self.devices[0])
else:
self.disc.cuda()
self.logger.info("Discriminator on gpu!")
# disc optimizer
self.disc_opt = Adam(self.disc.parameters(), lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and disc_op_ckpt is not None:
self.disc_opt.load_state_dict(disc_op_ckpt)
# scheduler for disc optimizer
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
self.disc_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.disc_opt, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
def train_step(self, x_in_true, x_in_fake, cond=None):
# predict
cond = cond if self.cond else None
self.disc.train()
# if self.parallel:
# x_in_true = x_in_true.cuda(self.devices[0])
# x_in_fake = x_in_fake.cuda(self.devices[0])
# set gradient to zero
self.disc_opt.zero_grad()
# real examples
x_in_true.requires_grad_()
pred_true, _ = self.disc(x_in_true, cond)
loss_real = get_member(self.disc,"loss")(pred_true, real=True)
if self.config[self.key]["gp_weight"] > 0:
loss_real.backward(retain_graph=True)
# gradient penalty
loss_gp = get_member(self.disc,"gp")(pred_true, x_in_true).mean()
gp_weighted = self.config[self.key]["gp_weight"] * loss_gp
gp_weighted.backward()
else:
loss_real.backward()
# fake examples
pred_fake, _ = self.disc(x_in_fake.detach(),cond)
loss_fake = get_member(self.disc,"loss")(pred_fake, real=False)
loss_fake.backward()
# optmize parameters
self.disc_opt.step()
loss_disc = ((loss_real + loss_fake) / 2.).item()
out_dict = {f"loss_disc_{self.postfix}": loss_disc, f"p_true_{self.postfix}": torch.sigmoid(pred_true).mean().item(), f"p_fake_{self.postfix}": torch.sigmoid(pred_fake).mean().item(),
f"loss_gp_{self.postfix}": loss_gp.item() if self.config[self.key]["gp_weight"] > 0 else 0 }
# train generator
pred_fake, fmap_fake = self.disc(x_in_fake,cond)
_, fmap_true = self.disc(x_in_true,cond)
if get_member(self.disc,"bce_loss"):
loss_gen = get_member(self.disc,"bce")(pred_fake, torch.ones_like(pred_fake))
else:
loss_gen = -torch.mean(pred_fake)
loss_fmap = get_member(self.disc,"fmap_loss")(fmap_fake, fmap_true)
# if self.parallel:
# loss_fmap = loss_fmap.cuda(self.devices[0])
# loss_gen = loss_gen.cuda(self.devices[0])
return out_dict, loss_gen, loss_fmap
# code taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
class PatchDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, config, norm_layer=nn.InstanceNorm2d):
super().__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
deep_disc = config["deep_disc"] if "deep_disc" in config else False
input_nc = 6 if config["pixel_dynamics"] else 3
n_deep_layers = config["deep_layers"]
ndf = 64
n_layers = config["n_layers"]
self.bce_loss = config["bce_loss"]
if self.bce_loss:
self.bce = nn.BCEWithLogitsLoss()
kw = 4
padw = 1
self.layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.in_conv = nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)
nf_mult = 1
nf_mult_prev = 1
self.act_fn = nn.LeakyReLU(0.2, True)
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.layers.append(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norms.append(norm_layer(ndf * nf_mult))
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
self.layers.append(nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))
self.norms.append(norm_layer(ndf * nf_mult))
n_d = ndf * nf_mult
if deep_disc:
n_max = 1024
for i in range(n_deep_layers):
# add one layer to the original patch discrminator to make it more powerful
self.layers.append(nn.Conv2d(n_d, min(n_max, n_d*2), kernel_size=kw, stride=1, padding=padw, bias=use_bias))
self.norms.append(norm_layer(min(n_max, n_d*2)))
n_d = min(n_max, n_d*2)
self.out_conv = nn.Conv2d(n_d, 1, kernel_size=kw, stride=1, padding=padw) # output 1 channel prediction map
def forward(self, input,cond=None):
"""Standard forward."""
x = self.act_fn(self.in_conv(input))
fmap = []
for i in range(len(self.layers)):
x = self.layers[i](x)
x = self.act_fn(self.norms[i](x))
fmap.append(x)
x = self.out_conv(x)
return x, fmap
def loss(self, pred, real):
if self.bce_loss:
# vanilla gan loss
return self.bce(pred, torch.ones_like(pred) if real else torch.zeros_like(pred))
else:
# hinge loss
if real:
l = torch.mean(torch.nn.ReLU()(1.0 - pred))
else:
l = torch.mean(torch.nn.ReLU()(1.0 + pred))
return l
def gp(self, pred_fake, x_fake):
batch_size = x_fake.size(0)
grad_dout = torch.autograd.grad(
outputs=pred_fake.sum(), inputs=x_fake,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_fake.size())
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def fmap_loss(self, fmap1, fmap2, loss="l1"):
recp_loss = 0
for idx in range(len(fmap1)):
if loss == "l1":
recp_loss += torch.mean(torch.abs((fmap1[idx] - fmap2[idx])))
if loss == "l2":
recp_loss += torch.mean((fmap1[idx] - fmap2[idx]) ** 2)
return recp_loss / len(fmap1)
######################################################################################################
###3D-ConvNet Implementation from https://github.com/tomrunia/PyTorchConv3D ##########################
def resnet10(**kwargs):
"""Constructs a ResNet-10 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def conv3x3x3(in_planes, out_planes, stride=1, stride_t=1):
# 3x3x3 convolution with padding
return spectral_norm(nn.Conv3d(
in_planes,
out_planes,
kernel_size=(3, 3, 3),
stride=[stride_t, stride, stride],
padding=[1, 1, 1],
bias=False))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, stride_t=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride, stride_t)
self.bn1 = nn.GroupNorm(num_groups=16, num_channels=planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.GroupNorm(num_groups=16, num_channels=planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
spatial_size,
sequence_length,
config):
super(ResNet, self).__init__()
# spatial_size = config["spatial_size"]
self.inplanes = 64
self.bce_loss = config["bce_loss"]
min_spatial_size = int(spatial_size / 8)
#sample_duration = dic.Network['sequence_length']-1
self.max_channels = config["max_channels"] if "max_channels" in config else 256
self.conv1 = spectral_norm(nn.Conv3d(
3,
64,
kernel_size=(3, 7, 7),
stride=(1, 2, 2),
padding=(1, 3, 3),
bias=False))
self.gn1 = nn.GroupNorm(num_groups=16, num_channels=64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=1)
self.layers = nn.ModuleList()
self.patch_temp = config["patch_temp_disc"]
self.spatio_temporal = config["spatio_temporal"] if"spatio_temporal" in config else False
if self.patch_temp:
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layers.append(self._make_layer(block, 128, layers[1], stride=1, stride_t=1))
self.layers.append(self._make_layer(block, 128, layers[2], stride=2, stride_t=1))
self.layers.append(self._make_layer(block, 256, layers[3], stride=2, stride_t=1))
last_size = int(math.ceil(spatial_size / 16))
last_duration = 1
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.cond = config["conditional"] if "conditional" in config else False
if self.cond:
self.spade_emb = SPADE(norm_nc=block.expansion * 256, label_nc=2, config=config)
self.fc = nn.Linear(256 * block.expansion, config["num_classes"], bias=False)
else:
spatial_size /= 2
self.layer1 = self._make_layer(block, 32, layers[0], stride=1)
n_channels = 64
if "conditional" in config and config["conditional"]:
raise ValueError("If non-patch-gan temporal discriminator is used, conditional must not be True!")
self.cond = False
n = 0
while sequence_length > 1:
blocks = layers[n] if n<sequence_length-1 else layers[-1]
n_channels = min(2*n_channels,self.max_channels)
stride = 1 if spatial_size <= min_spatial_size else 2
spatial_size = int(spatial_size / stride)
stride_t = 1 if self.spatio_temporal else (2 if sequence_length > 1 else 1)
self.layers.append(self._make_layer(block,n_channels,blocks,stride=stride,stride_t=stride_t))
sequence_length = int(math.ceil(sequence_length / 2))
n += 1
self.final = nn.Conv2d(n_channels,1,3,padding=1)
print(f"Temporal discriminator has {len(self.layers)} layers")
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.orthogonal_(m.weight)
def _make_layer(self, block, planes, blocks, stride=1, stride_t=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or stride_t != 1:
downsample = nn.Sequential(
spectral_norm(nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=[3, 3, 3],
stride=[stride_t, stride, stride],
padding=[1, 1, 1],
bias=False)),
nn.GroupNorm(num_channels=planes * block.expansion, num_groups=16))
layers = []
layers.append(block(self.inplanes, planes, stride, stride_t, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, cond=None):
out = []
x = self.conv1(x)
x = self.gn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
out.append(x)
for n in range(len(self.layers)):
x = self.layers[n](x)
out.append(x)
if self.patch_temp:
if self.cond:
x_norm = []
for i in range(x.size(2)):
x_norm.append(self.spade_emb(x[:,:,i],cond))
x_norm = torch.stack(x_norm,2)
else:
x_norm = x
x1 = self.avgpool(x_norm)
output = []
for i in range(x1.size(2)):
output.append(self.fc(x1[:,:,i].reshape(x1.size(0), -1)))
return torch.cat(output, dim=1), out
else:
output = self.final(x.squeeze(2))
return output, out
def loss(self, pred, real):
if self.bce_loss:
# vanilla gan loss
return self.bce(pred, torch.ones_like(pred) if real else torch.zeros_like(pred))
else:
# hinge loss
if real:
l = torch.mean(torch.nn.ReLU()(1.0 - pred))
else:
l = torch.mean(torch.nn.ReLU()(1.0 + pred))
return l
def gp(self, pred_fake, x_fake):
batch_size = x_fake.size(0)
grad_dout = torch.autograd.grad(
outputs=pred_fake.sum(), inputs=x_fake,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_fake.size())
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def fmap_loss(self, fmap1, fmap2, loss="l1"):
recp_loss = 0
for idx in range(len(fmap1)):
if loss == "l1":
recp_loss += torch.mean(torch.abs((fmap1[idx] - fmap2[idx])))
if loss == "l2":
recp_loss += torch.mean((fmap1[idx] - fmap2[idx]) ** 2)
return recp_loss / len(fmap1)
# return output, out, mu
if __name__ == '__main__':
## Test 3dconvnet with dummy input
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '7'
config = {"num_classes": 1, "patch_temp_disc": True,"spatial_size": 128, "bce_loss": False, "conditional": True}
dummy = torch.rand((2, 3, 6, 128, 128)).cuda()
dummy_cond = torch.rand((2, 2, 128, 128)).cuda()
model = resnet(config=config,spatial_size=128, sequence_length=dummy.shape[2]).cuda()
print("Number of parameters in generator", sum(p.numel() for p in model.parameters()))
if config["conditional"]:
out, out2 = model(dummy,dummy_cond)
else:
out, out2,= model(dummy)
test = 1 | 17,349 | 37.988764 | 191 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/latent_flow_net.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import math
from models.blocks import Conv2dBlock, ResBlock, AdaINLinear, NormConv2d,ConvGRU
class OscillatorModel(nn.Module):
def __init__(self,spatial_size,config,n_no_motion=2, logger=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.reparamterize = config["reparameterize_poke"] if "reparameterize_poke" in config else False
self.norm_layer = config["norm_layer"] if "norm_layer" in config else "in"
self.layers = config["layers"]
self.n_gru_layers = config["n_gru_layers"] if "n_gru_layers" in config else 3
self.n_no_motion = n_no_motion
self.n_stages = len(self.layers)
assert self.n_no_motion is not None
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=self.layers[-1], n_stages=self.n_stages, n_skip_stages=self.n_stages
,nf_first=nf_first_shape_enc, norm_layer=self.norm_layer,layers=self.layers)
self.dynamics_enc = Encoder(2, nf_max=self.layers[-1], n_stages=self.n_stages,
variational=self.reparamterize, norm_layer=self.norm_layer, layers=self.layers)
ups = [False] * self.n_gru_layers
#input_sizes = [self.layers[-1]*2] + (len(self.layers)-1) * [self.layers[-1]]
self.fusion_block = ConvGRU(input_size=self.layers[-1] * 2, hidden_sizes=self.layers[-1], kernel_sizes=3, n_layers=self.n_gru_layers,
upsampling=ups,)
self.dec = SkipConnectionDecoder(nf_in=self.layers[-1], in_channels=self.shape_enc.depths, n_skip_stages=len(self.layers),
disentanglement=False, norm_layer=self.norm_layer, layers=self.layers)
if logger is not None:
logger.info("Constructed OscillatorModel")
logger.info(f"Layers of OscillatorModel is {self.layers}")
logger.info(f"Encoder channels of oscillator model is {self.layers}")
def forward(self,input_img, poke, len,n_ref,target_img=None):
imgs = []
sigmas_hat_out = []
if target_img==None:
target_img = input_img
if self.reparamterize:
delta, mu, _ = self.dynamics_enc(poke)
else:
delta = self.dynamics_enc(poke)[0]
# only first time shape encoding
# if self.poke_scale_mode and not poke_linear:
sigmas = self.shape_enc(input_img)
sigma_tgt = self.shape_enc(target_img)[-1]
sigma_dyn = sigmas.pop()
pred = [sigma_dyn] * self.n_gru_layers
pred_out = pred[-1]
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
# get inputs for network
delta_in = delta * (1. - float(n)/(n_ref-1)) if n <= n_ref else torch.zeros_like(delta)
if self.training:
sigma_diff = pred_out - sigma_tgt if n < len - self.n_no_motion else torch.zeros_like(pred_out)
else:
sigma_diff = pred_out - sigma_tgt
delta_in = torch.cat([delta_in,sigma_diff],1)
# predict object encoding at next time step
pred = self.fusion_block(delta_in, pred)
pred_out = pred[-1]
sigmas.append(pred_out)
# decode
x = self.dec(sigmas, [], del_shape=False)
imgs.append(x)
sigmas_hat_out.append(pred_out)
#remove pred
sigmas.pop()
imgs = torch.stack(imgs, dim=1)
#sigmas_hat_out[-1].reverse()
return imgs, sigmas_hat_out
class SAVPArchModel(nn.Module):
def __init__(self, spatial_size, config):
super().__init__()
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
self.dynamics_enc = Encoder(nf_in=2, nf_max=64, n_stages=self.n_stages)
self.gen = SAVPGenerator(self.poke_every_t)
def forward(self,img,poke,len):
# encode dynamics
delta = self.dynamics_enc(poke)[0]
out = self.gen(img,delta,len)
return out
class SAVPGenerator(nn.Module):
def __init__(self, poke_every):
super().__init__()
self.poke_every_t = poke_every
# encoder stuff
self.conv_e1 = Conv2dBlock(3, 32, 3, 2, norm="in", padding=1, activation="relu")
# ssize 32
self.rnn_e1 = ConvGRU(32, 32, 3, 1)
self.conv_e2 = Conv2dBlock(32, 64, 3, 2, norm="in", padding=1, activation="relu")
# ssize 16
self.rnn_e2 = ConvGRU(64, 64, 3, 1)
# bottleneck
self.conv_bn = Conv2dBlock(128, 128, 3, 2, norm="in", padding=1, activation="relu")
# ssize 8
self.rnn_bn = ConvGRU(128, 64, 3, 1)
# decoder stuff
self.up1 = nn.Upsample((16, 16), mode="bilinear")
# ssize 16
self.conv_d1 = Conv2dBlock(128, 64, 3, 1, norm="in", padding=1, activation="relu")
self.rnn_d1 = ConvGRU(64, 32, 3, 1)
self.up2 = nn.Upsample((32, 32), mode="bilinear")
# ssize 32
self.conv_d2 = Conv2dBlock(64, 32, 3, 1, norm="in", padding=1, activation="relu")
self.rnn_d2 = ConvGRU(32, 32, 3, 1)
self.up3 = nn.Upsample((64, 64), mode="bilinear")
self.conv_out = Conv2dBlock(32, 3, 3, 1, 1, norm="none", activation="tanh")
def forward(self,img,delta,len):
x = img
out_imgs = []
for t in range(len):
x1e = self.conv_e1(x)
x1er = self.rnn_e1(x1e,[x1er] if t>0 else None)[0]
x2e = self.conv_e2(x1er)
x2er = self.rnn_e2(x2e,[x2er] if t>0 else None)[0]
if t > 0 and not self.poke_every_t:
delta = np.zeros_like(delta)
xbn = torch.cat([x2er,delta],dim=1)
xbn = self.conv_bn(xbn)
xbnr = self.rnn_bn(xbn,[xbnr] if t>0 else None)[0]
x1d = self.up1(xbnr)
x1d = self.conv_d1(torch.cat([x1d,x2er],1))
x1dr = self.rnn_d1(x1d,[x1dr] if t>0 else None)[0]
x2d = self.up2(x1dr)
x2d = self.conv_d2(torch.cat([x2d,x1er],1))
x2dr = self.rnn_d2(x2d,[x2dr] if t > 0 else None)[0]
x = self.conv_out(self.up3(x2dr))
out_imgs.append(x)
out_imgs = torch.stack(out_imgs,1)
return out_imgs
class ForegroundBackgroundModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.variational = config["variational"] if "variational" in config else False
foreground_background_div = config["foreground_background_div"]
assert foreground_background_div >= 1.
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
if self.variational:
self.shape_enc = VariationalSkipConnectionEncoderFGBG(nf_in=3,nf_max=config["nf_deep"],n_stages=self.n_stages, n_skip_stages=self.n_stages,
nf_first=nf_first_shape_enc)
else:
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,
n_skip_stages=self.n_stages, nf_first=nf_first_shape_enc,
fg_bg=True, div=foreground_background_div)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
hidden_sizes = [int(config["nf_deep"]/foreground_background_div)] + [int(d/foreground_background_div) for d in self.shape_enc.depths]
ups = [False] + self.shape_enc.downs
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=hidden_sizes, kernel_sizes=3,
n_layers=self.n_stages + 1, upsampling=ups)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths,
n_skip_stages=self.n_stages, disentanglement=False)
def forward(self,fg_img,bg_img,poke,len):
x = fg_img
mus = logstds = None
if len > 0:
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, x], dim=1)
imgs = []
sigmas_fg = []
sigmas_bg = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
# only first time shape encoding
sigma_n = self.shape_enc(x)[0]
sigma_bg = self.shape_enc(bg_img)[1]
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
sigma_n.reverse()
sigma_n = self.fusion_block(delta, sigma_n)
sigma_n.reverse()
sigma_cat = [torch.cat([sfg,sbg],dim=1) for sfg,sbg in zip(sigma_n,sigma_bg)]
x = self.dec(sigma_cat, None, del_shape=True)
imgs.append(x)
# output foreground representation
sigmas_fg.append(sigma_n)
# out
sigmas_bg.append(sigma_bg)
imgs = torch.stack(imgs, dim=1)
sigmas_fg[-1].reverse()
else:
if self.variational:
sigmas_fg, sigmas_bg1, mus, logstds = self.shape_enc(x)
_, sigmas_bg2, *_ = self.shape_enc(bg_img)
else:
sigmas_fg, sigmas_bg1 = self.shape_enc(x)
_, sigmas_bg2 = self.shape_enc(bg_img)
sigmas_bg = (sigmas_bg1,sigmas_bg2)
sigmas = [torch.cat([sfg,sbg],dim=1) for sfg,sbg in zip(sigmas_fg,sigmas_bg2)]
imgs = self.dec(sigmas, None, del_shape=True)
sigmas_fg.reverse()
return imgs, sigmas_fg, sigmas_bg, mus, logstds
class SkipSequenceModel(nn.Module):
def __init__(self,spatial_size,config,n_no_motion=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.min_spatial_size = config["min_spatial_size"] if "min_spatial_size" in config else 16
self.n_stages = int(np.log2(spatial_size[0] // self.min_spatial_size))
print(f"number of stages in model is {self.n_stages}")
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
use_spectral_norm = config["spectnorm_decoder"] if "spectnorm_decoder" in config else False
self.reparamterize = config["reparameterize_poke"] if "reparameterize_poke" in config else False
self.norm_layer = config["norm_layer"] if "norm_layer" in config else "in"
self.layers = config["layers"] if "layers" in config and len(config["layers"])>0 else None
self.poke_scale_mode = config["poke_scale"] if "poke_scale" in config else False
# self.n_no_motion = n_no_motion
# if self.poke_scale_mode:
# assert self.n_no_motion is not None
# self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
# if self.disentanglement:
# self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
# resnet_down=config["resnet_down"] if "resnet_down" in config else False)
# n_skip_stages = min(config["n_skip_stages"], self.n_stages) if "n_skip_stages" in config else self.n_stages
nf_first_shape_enc = int(max(32, config["nf_deep"] / (2 ** self.n_stages)))
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=self.n_stages
,nf_first=nf_first_shape_enc, norm_layer=self.norm_layer,layers=self.layers)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages,
variational=self.reparamterize, norm_layer=self.norm_layer, layers=self.layers)
hidden_sizes = [config["nf_deep"]]+self.shape_enc.depths
ups = [False] + self.shape_enc.downs
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=hidden_sizes, kernel_sizes=3, n_layers=self.n_stages+1 if self.layers is None else len(self.layers)+1,
upsampling=ups,)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=self.n_stages if self.layers is None else len(self.layers),
disentanglement=False, spectral_norm=use_spectral_norm, norm_layer=self.norm_layer, layers=self.layers)
def forward(self, app_img, shape_img, poke, len, poke_linear=False,delta_scaling = None, n_zero_frames=0, invert_poke=False, poke_jump=False):
# if self.disentanglement:
# alpha, *_ = self.appearance_enc(app_img)
# else:
# alpha = None
# sigma = self.shape_enc(shape_img)
x = shape_img
if len > 0:
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, app_img], dim=1)
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
if self.reparamterize:
delta, mu, _ = self.dynamics_enc(poke)
else:
delta = self.dynamics_enc(poke)[0]
sigmas_out = delta
# only first time shape encoding
#if self.poke_scale_mode and not poke_linear:
sigma_n = self.shape_enc(x)
for n in range(len):
# apply fusion block: input is delta, hidden states are the sigma_n
sigma_n.reverse()
if self.poke_scale_mode:
if poke_linear:
if invert_poke:
delta_in = delta * (1 - float(n) / int(len/2)) if n < int(len/2) else delta * (float(n - int(len/2)) / int(math.ceil(float(len)/2)) - 1)
else:
delta_in = (1 - float(n) / (len-n_zero_frames)) * delta if n <= len - n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta_scaling[n] * delta_in
else:
if poke_jump:
delta_in = delta if n < len -n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta if self.poke_every_t else (delta if n == 0 else torch.zeros_like(delta))
sigma_n = self.fusion_block(delta_in, sigma_n)
sigma_n.reverse()
x = self.dec(sigma_n, [], del_shape=False)
imgs.append(x)
sigmas_hat_out.append(sigma_n)
imgs = torch.stack(imgs, dim=1)
sigmas_hat_out[-1].reverse()
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas
sigmas_hat_out = None
imgs = self.dec(sigmas, [], del_shape=False)
sigmas_out.reverse()
return imgs, sigmas_out, sigmas_hat_out, []
class SingleScaleBaseline(nn.Module):
def __init__(self, spatial_size, config, n_no_motion=None):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.poke_scale_mode = config["poke_scale"] if "poke_scale" in config else False
self.poke_every_t = config["poke_every_t"] if "poke_every_t" in config else True
self.n_no_motion = n_no_motion
if self.poke_scale_mode:
assert self.n_no_motion is not None
print("Initialize SingleScaleBaseline")
# self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=0)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.n_gru_layers = 3
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=config["nf_deep"], kernel_sizes=3, n_layers=config["n_gru_layers"])
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=0, disentanglement=self.disentanglement)
def forward(self, app_img, shape_img, poke, len, poke_linear=False,delta_scaling = None, n_zero_frames=0, invert_poke=False,poke_jump=False):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
# sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke, app_img], dim=1)
x = shape_img
if len > 0:
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
sigma_n = self.shape_enc(x)[0]
sigma_n = torch.stack([sigma_n] * self.n_gru_layers)
for n in range(len):
# delta scaling
delta_in = delta if self.poke_every_t else (delta if n == 0 else torch.zeros_like(delta))
if self.poke_scale_mode:
if poke_linear:
if invert_poke:
delta_in = delta * (1 - float(n) / int(len / 2)) if n < int(len / 2) else delta * (float(n - int(len / 2)) / int(math.ceil(float(len) / 2)) - 1)
else:
delta_in = (1 - float(n) / (len - n_zero_frames)) * delta if n <= len - n_zero_frames else torch.zeros_like(delta)
else:
delta_in = delta_scaling[n] * delta_in
# apply fusion block
sigma_n = self.fusion_block(delta_in, sigma_n)
# residual connection
sigma_n1 = sigma_n[-1]
x = self.dec([sigma_n1], alpha, del_shape=False)
imgs.append(x)
sigmas_hat_out.append(sigma_n1)
#sigmas_hat_out = torch.stack(sigmas_hat_out)
imgs = torch.stack(imgs, dim=1)
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas[-1]
sigmas_hat_out = None
imgs = self.dec(sigmas, alpha, del_shape=False)
return imgs, sigmas_out, sigmas_hat_out, alpha
class ResidualSequenceBaseline(nn.Module):
def __init__(self,spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
#self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=0)
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.n_gru_layers = 3
self.fusion_block = ConvGRU(input_size=config["nf_deep"], hidden_sizes=config["nf_deep"],kernel_sizes=3, n_layers=3)
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"], in_channels=self.shape_enc.depths, n_skip_stages=0, disentanglement=self.disentanglement)
def forward(self,app_img,shape_img,poke,len):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
#sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
x = shape_img
if len>0:
imgs = []
sigmas_hat_out = []
sigmas_out = []
# infer dynamics input
delta = self.dynamics_enc(poke)[0]
delta = torch.stack([delta]*self.n_gru_layers)
for n in range(len):
# shape encoding
sigma_n = self.shape_enc(x)[0]
# apply fusion block
delta = self.fusion_block(sigma_n,delta)
# residual connection
sigma_n1 = sigma_n + delta[-1]
x = self.dec([sigma_n1],alpha)
imgs.append(x)
sigmas_hat_out.append(sigma_n1)
sigmas_hat_out = torch.stack(sigmas_hat_out,)
imgs = torch.stack(imgs,dim=1)
else:
sigmas = self.shape_enc(x)
sigmas_out = sigmas[-1]
sigmas_hat_out = None
imgs = self.dec(sigmas,alpha,del_shape=False)
return imgs, sigmas_out, sigmas_hat_out, alpha
class DynamicSkipModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.disentanglement = config["disentanglement"] if "disentanglement" in config else False
self.cat_poke_img = config["poke_and_img"]
self.zeroflow_baseline = config["zeroflow_baseline"]
self.multiscale_fusion_block = config["multiscale_dynamics"]
# default is dynamics model
if self.disentanglement:
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=True,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
n_skip_stages = min(config["n_skip_stages"],self.n_stages) if "n_skip_stages" in config else self.n_stages
self.shape_enc = SkipConnectionEncoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,n_skip_stages=n_skip_stages)
if config["multiscale_dynamics"]:
self.dynamics_enc = SkipConnectionEncoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages, n_skip_stages=n_skip_stages)
self.fusion_block = FusionBlockMultiscale(nf_in=config["nf_deep"],nfs=self.shape_enc.depths,n_blocks=config["n_blocks"])
else:
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.fusion_block = LearnedFusionBlock(nf=config["nf_deep"], n_blocks=config["n_blocks"])
self.dec = SkipConnectionDecoder(nf_in=config["nf_deep"],in_channels=self.shape_enc.depths, n_skip_stages=n_skip_stages,disentanglement=self.disentanglement)
def forward(self,app_img,shape_img,poke, apply_dynamics = False):
if self.disentanglement:
alpha, *_ = self.appearance_enc(app_img)
else:
alpha = None
sigma = self.shape_enc(shape_img)
if self.zeroflow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
delta = self.dynamics_enc(poke) if self.multiscale_fusion_block else self.dynamics_enc(poke)[0]
if apply_dynamics:
sigma_hat = self.fusion_block(sigma if self.multiscale_fusion_block else sigma.pop(), delta)
if self.multiscale_fusion_block:
sigma_in_dec = sigma_hat
else:
sigma_in_dec = sigma + [sigma_hat]
sigma_out = sigma_hat
else:
sigma_out = sigma if self.multiscale_fusion_block else sigma[-1]
img = self.dec(sigma_in_dec if apply_dynamics else sigma,alpha,del_shape=False)
return img, sigma_out, alpha
class DisentangledModelWithoutDynamics(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.adain = config["adain"]
# default is dynamics model
self.latent_fusion = config["latent_fusion"] if "latent_fusion" in config else None
self.appearance_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, prepare_adain=self.adain,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = Encoder(nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages, variational=config["ib_shape"])
if self.adain:
self.dec = AdaINDecoderDisentangled(nf_in=config["nf_deep"], n_stages=self.n_stages, latent_fusion=self.latent_fusion, nf_in_bn=self.appearance_enc.nf_in_bn)
else:
self.dec = DecoderEntangled(nf_in=2 * config["nf_deep"],n_stages=self.n_stages)
def forward(self,app_img,shape_img):
# appearance representation
alpha, alpha_spatial, *_ = self.appearance_enc(app_img)
# shape representation
sigma, shape_mean, shape_logstd = self.shape_enc(shape_img)
# decode
img = self.dec(alpha, sigma, alpha_spatial)
return img, alpha, sigma, shape_mean,shape_logstd
class BasicDisentangledModel(nn.Module):
def __init__(self, spatial_size,config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.zero_flow_baseline = config["zero_flow_baseline"]
self.adain = config["adain"]
self.cat_poke_img = config["poke_and_img"]
# default is dynamics model
self.latent_fusion = config["latent_fusion"] if "latent_fusion" in config else None
self.appearance_enc = Encoder(nf_in=3,nf_max = config["nf_deep"], n_stages=self.n_stages, prepare_adain=self.adain,
resnet_down=config["resnet_down"] if "resnet_down" in config else False)
self.shape_enc = Encoder(nf_in=3, nf_max = config["nf_deep"],n_stages=self.n_stages, variational=config["ib_shape"])
self.dynamics_enc = Encoder(nf_in=5 if self.cat_poke_img else 2, nf_max=config["nf_deep"], n_stages=self.n_stages)
self.fusion_block = LearnedFusionBlock(nf=config["nf_deep"],n_blocks=config["n_blocks"])
if self.adain:
self.dec = AdaINDecoderDisentangled(nf_in=config["nf_deep"],n_stages=self.n_stages,latent_fusion=self.latent_fusion,nf_in_bn=self.appearance_enc.nf_in_bn)
else:
self.dec = DecoderEntangled(nf_in=2 * config["nf_deep"],n_stages=self.n_stages)
def forward(self, app_img, shape_img, poke, apply_dynamics = False):
# appearance representation
alpha, alpha_spatial, *_ = self.appearance_enc(app_img)
# shape representation
sigma, shape_mean, shape_logstd = self.shape_enc(shape_img)
# dynamics representation
if self.zero_flow_baseline:
poke = torch.zeros_like(poke)
if self.cat_poke_img:
poke = torch.cat([poke,app_img],dim=1)
delta, *_ = self.dynamics_enc(poke)
if self.zero_flow_baseline:
delta = torch.zeros_like(delta)
# apply dynamics to shape represenation
sigma_hat = self.fusion_block(sigma,delta)
# decode
if apply_dynamics:
img = self.dec(alpha,sigma_hat,alpha_spatial)
else:
img = self.dec(alpha,sigma,alpha_spatial)
return img, sigma, sigma_hat, alpha, delta, shape_mean, shape_logstd
class LearnedFusionBlock(nn.Module):
def __init__(self,nf,n_blocks):
super().__init__()
assert n_blocks >= 1
blocks = [ResBlock(2*nf,nf)]
for i in range(1,n_blocks):
blocks.append(ResBlock(nf,nf))
self.model = nn.Sequential(*blocks)
def forward(self,sigma,delta):
x = torch.cat([sigma,delta],dim=1)
x = self.model(x)
return x
class BasicModel(nn.Module):
def __init__(self, spatial_size, config):
super().__init__()
# number of downsampling layers; always such that spatial bottleneck size is 16x16
self.n_stages = int(np.log2(spatial_size[0] // 16))
self.zero_flow_baseline = config["zero_flow_baseline"]
self.adain = config["adain"]
self.obj_enc = Encoder(
nf_in=3, nf_max=config["nf_deep"], n_stages=self.n_stages,variational=config["variational"])
self.flow_enc = Encoder(
nf_in=2,
nf_max=config["nf_deep"],
n_stages=self.n_stages,
prepare_adain=self.adain
)
if self.adain:
self.dec = AdaINDecoderEntangled(
nf_in=config["nf_deep"], n_stages=self.n_stages,latent_fusion=config["latent_fusion"]
)
else:
self.dec = DecoderEntangled(nf_in=2*config["nf_deep"],n_stages=self.n_stages)
def forward(self, image, flow,sample_prior=False):
# get object code and variational paramezers if model is variational
object_code, mean, logstd = self.obj_enc(image,sample_prior)
# get dynamics codes
dynamics_code1, dynamics_code2, _ = self.flow_enc(flow)
if self.zero_flow_baseline:
# this is without flow usage, to measure the impacts of the flow as adain input
dynamics_code1 = torch.zeros_like(dynamics_code1)
dynamics_code2 = torch.zeros_like(dynamics_code2)
# decode
if self.adain:
img = self.dec(object_code, dynamics_code1,dynamics_code2)
else:
img = self.dec(object_code,dynamics_code1)
return img, object_code, dynamics_code1, mean, logstd
class VariationalSkipConnectionEncoderFGBG(nn.Module):
def __init__(self,nf_in,nf_max, n_stages, n_skip_stages, act = "relu", nf_first=None):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages
self.depths = []
self.downs = []
if nf_first is None:
nf = 64
else:
nf = nf_first
# required
self.blocks.append(
NormConv2d(
nf_in, int(1.5 * nf), 3, 2, padding=1
)
)
self.n_skip_stages = n_skip_stages
self.depths.append(nf)
for n in range(self.n_stages - 1):
self.blocks.append(
NormConv2d(
nf,
min(nf * 3, int(1.5*nf_max)),
3,
2,
padding=1,
)
)
nf = min(nf * 2, nf_max)
self.depths.insert(0, nf)
self.downs.insert(0, True)
self.bottleneck = ResBlock(nf, int(1.5 * nf_max), activation=act, stride=1)
self.downs.insert(0, False)
self.squash = nn.Sigmoid()
def _reparameterize(self,codes):
mu = codes[:,:int(codes.shape[1]/2)]
logstd = codes[:,int(codes.shape[1]/2):]
logstd = self.squash(logstd)
std = torch.exp(logstd)
eps = torch.randn_like(std)
return eps.mul(std) + mu, mu, logstd
def forward(self,x):
out_fg = []
out_bg = []
out_logstd = []
out_mu = []
for i in range(self.n_stages):
x = self.blocks[i](x)
if i >= self.n_stages - self.n_skip_stages:
act_div = int(x.shape[1] * 2. / 3.)
sample, mu, logstd = self._reparameterize(x[:,:act_div])
out_fg.append(sample)
bg = x[:,act_div:]
out_bg.append(bg)
out_mu.append(mu)
out_logstd.append(logstd)
x = torch.cat([mu,bg], dim=1)
x = self.bottleneck(x)
act_div = int(x.shape[1] * 2. / 3.)
sample, mu, logstd = self._reparameterize(x[:, :act_div])
out_fg.append(sample)
bg = x[:, act_div:]
out_bg.append(bg)
out_mu.append(mu)
out_logstd.append(logstd)
return out_fg, out_bg, out_mu, out_logstd
class SkipConnectionEncoder(nn.Module):
def __init__(self,nf_in,nf_max, n_stages, n_skip_stages, act = "elu", nf_first=None, fg_bg = False, div= None, norm_layer="in", layers=None):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages if layers is None else len(layers)
self.depths = []
self.downs = []
if nf_first is None:
nf = 32
else:
nf = nf_first
if layers is not None:
nf = layers[0]
self.fg_bg = fg_bg
if self.fg_bg:
assert div is not None
self.div = div
self.blocks.append(
Conv2dBlock(
nf_in, nf, 3, 2, norm=norm_layer, activation=act, padding=1
)
)
self.n_skip_stages = n_skip_stages if layers is None else len(layers)
self.depths.append(nf)
for n in range(self.n_stages - 1):
self.blocks.append(
Conv2dBlock(
nf,
min(nf * 2, nf_max) if layers is None else layers[n+1],
3,
2,
norm=norm_layer,
activation=act,
padding=1,
)
)
nf = min(nf * 2, nf_max) if layers is None else layers[n+1]
self.depths.insert(0,nf)
self.downs.insert(0,True)
self.bottleneck = ResBlock(nf, nf_max, activation=act, stride=1,norm=norm_layer)
self.downs.insert(0,False)
def forward(self,x):
if self.fg_bg:
out_fg = []
out_bg = []
else:
out = []
for i in range(self.n_stages):
x = self.blocks[i](x)
if i >= self.n_stages - self.n_skip_stages:
if self.fg_bg:
act_div = int(x.shape[1] / self.div)
out_fg.append(x[:,:act_div])
out_bg.append(x[:,act_div:])
else:
out.append(x)
x = self.bottleneck(x)
if self.fg_bg:
act_div = int(x.shape[1] / self.div)
out_fg.append(x[:,:act_div])
out_bg.append(x[:,act_div:])
return out_fg, out_bg
else:
out.append(x)
return out
class Encoder(nn.Module):
def __init__(self, nf_in, nf_max, n_stages, prepare_adain=False, variational=False, resnet_down=False, norm_layer = "in", layers=None):
super().__init__()
self.prepare_adain = prepare_adain
self.variational = variational
if self.prepare_adain:
assert not self.variational, "Encoder should not be variational if adain is prepared"
if self.prepare_adain:
self.final_linear = nn.Linear(nf_max, nf_max)
act = "elu" #if self.variational else "relu"
blocks = []
bottleneck = []
nf = 32 if layers is None else layers[0]
blocks.append(
Conv2dBlock(
nf_in, nf, 3, 2, norm=norm_layer, activation=act, padding=1
)
)
n_stages = n_stages if layers is None else len(layers)
for n in range(n_stages - 1):
blocks.append(
Conv2dBlock(
nf,
min(nf * 2, nf_max) if layers is None else layers[n+1],
3,
2,
norm=norm_layer,
activation=act,
padding=1,
)
)
nf = min(nf * 2, nf_max) if layers is None else layers[n+1]
self.resnet_down = resnet_down and self.prepare_adain
self.nf_in_bn = nf
bottleneck.append(ResBlock(nf, nf_max,activation=act, stride=2 if self.resnet_down else 1, norm=norm_layer))
if layers is None:
bottleneck.append(ResBlock(nf_max, nf_max,activation=act, stride=2 if self.resnet_down else 1, norm=norm_layer))
if self.resnet_down:
self.make_vector = Conv2dBlock(nf_max,nf_max,4,1,0)
if self.variational:
self.make_mu = NormConv2d(nf_max,nf_max,3, padding=1)
self.make_sigma = NormConv2d(nf_max,nf_max,3, padding=1)
self.squash = nn.Sigmoid()
self.model = nn.Sequential(*blocks)
self.bottleneck = nn.Sequential(*bottleneck)
def forward(self, input, sample_prior=False):
out = self.model(input)
mean = out
out = self.bottleneck(out)
logstd = None
if self.prepare_adain:
# mean is a false name here, this is the raw channels of the conv model
# mean = out
if self.resnet_down:
# in this case, mean has spatial_size 4x4
out = self.make_vector(out).squeeze(-1).squeeze(-1)
else:
out = F.avg_pool2d(out, out.size(2), padding=0)
out = out.squeeze(-1).squeeze(-1)
# no activation for the first trial, as relu would not allow for values < 0
out = self.final_linear(out)
elif self.variational:
mean = self.make_mu(out)
# normalize sigma in between
logstd = self.squash(self.make_sigma(out))
if sample_prior:
out = torch.randn_like(mean)
else:
out = self.reparametrize(mean,logstd)
return out, mean, logstd
def reparametrize(self,mean,logstd):
std = torch.exp(logstd)
eps = torch.randn_like(std)
return eps.mul(std) + mean
class AdaINDecoderEntangled(nn.Module):
"""
We sample up from spatial resolution 16x16, given quadratic images
"""
def __init__(self, nf_in, n_stages,latent_fusion):
super().__init__()
self.blocks = nn.ModuleList()
self.affines = nn.ModuleList()
self.n_stages = n_stages
self.latent_fusion = latent_fusion
# results latent fusion results in a deeper model
nf = nf_in * 2 if self.latent_fusion else nf_in
self.in_block = ResBlock(nf, nf)
for n in range(self.n_stages):
self.affines.append(AdaINLinear(nf_in, int(nf // 2)))
# upsampling adain layers
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="adain", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self, object_code, dynamics_linear,dynamics_spatial):
if self.latent_fusion:
in_code = torch.cat([object_code,dynamics_spatial],dim=1)
else:
in_code = object_code
x = self.in_block(in_code)
for n in range(self.n_stages):
adain_params = self.affines[n](dynamics_linear)
x = self.blocks[n](x, adain_params)
x = self.out_conv(x)
return x
class AdaINDecoderDisentangled(nn.Module):
def __init__(self,nf_in, n_stages, latent_fusion = None, nf_in_bn = 0):
super().__init__()
self.blocks = nn.ModuleList()
self.affines = nn.ModuleList()
self.n_stages = n_stages
self.latent_fusion = False if latent_fusion is None else latent_fusion
if self.latent_fusion:
assert nf_in_bn > 0
# results latent fusion results in a deeper model
nf = nf_in + nf_in_bn if self.latent_fusion else nf_in
# self.bottleneck_adain = bottleneck_adain
self.in_block = ResBlock(nf, nf,)
for n in range(self.n_stages):
self.affines.append(AdaINLinear(nf_in, int(nf // 2)))
# upsampling adain layers
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="adain", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self,alpha,sigma,alpha_spatial=None):
if self.latent_fusion:
assert alpha_spatial is not None
in_code = torch.cat([sigma, alpha_spatial],dim=1)
else:
in_code = sigma
x = self.in_block(in_code)
for n in range(self.n_stages):
adain_params = self.affines[n](alpha)
x = self.blocks[n](x, adain_params)
x = self.out_conv(x)
return x
class SkipConnectionDecoder(nn.Module):
def __init__(self,nf_in, in_channels, n_skip_stages, disentanglement=False, spectral_norm=False, norm_layer="in",layers=None):
super().__init__()
self.n_stages = len(in_channels)
self.disentanglement = disentanglement
self.n_skip_stages = n_skip_stages
self.blocks = nn.ModuleList()
if self.disentanglement:
self.affines = nn.ModuleList()
nf = nf_in
self.in_block = ResBlock(nf,in_channels[0], snorm=spectral_norm, norm=norm_layer)
for i,nf in enumerate(in_channels):
if layers is None:
n_out = int(nf // 2) if i < len(in_channels) - 1 else nf
if self.disentanglement:
self.affines.append(AdaINLinear(nf_in,n_out))
nf_in_dec = 2 * nf if i < self.n_skip_stages else nf
if layers is not None:
nf_in_dec = 2 * nf
n_out = in_channels[i+1] if i < len(in_channels) -1 else nf
self.blocks.append(ResBlock(nf_in_dec, n_out , norm="adain" if self.disentanglement else norm_layer, upsampling=True,snorm=spectral_norm))
self.out_conv = Conv2dBlock(nf,3,3,1,1,norm="none",activation="tanh")
def forward(self,shape, appearance = None, del_shape=True):
x = self.in_block(shape.pop() if del_shape else shape[-1])
for n in range(self.n_stages):
if n < self.n_skip_stages:
x = torch.cat([x,shape.pop() if del_shape else shape[self.n_skip_stages-1-n]],1)
if self.disentanglement:
adain_params = self.affines[n](appearance)
x = self.blocks[n](x,adain_params)
else:
x = self.blocks[n](x)
if del_shape:
assert not shape
out = self.out_conv(x)
return out
class DecoderEntangled(nn.Module):
"""
We sample up from spatial resolution 16x16, given quadratic images
"""
def __init__(self, nf_in, n_stages):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = n_stages
nf = nf_in
self.in_block = ResBlock(nf, nf)
for n in range(self.n_stages):
self.blocks.append(
ResBlock(nf, int(nf // 2), norm="in", upsampling=True)
)
nf = int(nf // 2)
self.out_conv = Conv2dBlock(
nf, 3, 3, 1, padding=1, norm="none", activation="tanh"
)
def forward(self, object_code, dynamics_code,*args):
in_code = torch.cat([object_code,dynamics_code],dim=1)
x = self.in_block(in_code)
for n in range(self.n_stages):
x = self.blocks[n](x)
x = self.out_conv(x)
return x
class FusionBlockMultiscale(nn.Module):
def __init__(self,nf_in,nfs,n_blocks):
super().__init__()
self.blocks = nn.ModuleList()
self.n_stages = len(nfs) + 1
nf = nf_in
for n in range(self.n_stages):
self.blocks.append(LearnedFusionBlock(nf,n_blocks))
if n < len(nfs):
nf = nfs[n]
def forward(self,sigmas,deltas):
out = []
for i,n in enumerate(range(len(sigmas)-1,-1,-1)) :
out.insert(0,self.blocks[i](sigmas[n],deltas[n]))
return out
| 45,992 | 39.274081 | 181 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/models/blocks.py | import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import weight_norm, spectral_norm
from torch.nn import init
class ResBlock(nn.Module):
def __init__(
self,
dim_in,
dim_out,
norm="in",
activation="elu",
pad_type="zero",
upsampling=False,
stride = 1,
snorm=False
):
super(ResBlock, self).__init__()
self.norm = norm
self.model = nn.ModuleList()
if upsampling:
self.conv1 = Conv2dTransposeBlock(
dim_in,
dim_out,
3,
2,
1,
norm=self.norm,
activation=activation,
snorm= snorm
)
self.conv2 = Conv2dBlock(
dim_out,
dim_out,
3,
1,
1,
norm=self.norm,
activation="none",
pad_type=pad_type,
snorm=snorm
)
else:
self.conv1 = Conv2dBlock(
dim_in,
dim_out,
3,
stride,
1,
norm=self.norm,
activation=activation,
pad_type=pad_type,
snorm=snorm
)
self.conv2 = Conv2dBlock(
dim_out,
dim_out,
3,
1,
1,
norm=self.norm,
activation="none",
pad_type=pad_type,
snorm=snorm
)
self.convolve_res = dim_in != dim_out or upsampling or stride != 1
if self.convolve_res:
if not upsampling:
self.res_conv = Conv2dBlock(dim_in,dim_out,3,stride,1,
norm="in",
activation=activation,
pad_type=pad_type,
snorm=snorm)
else:
self.res_conv = Conv2dTransposeBlock(dim_in,dim_out,3,2,1,
norm="in",
activation=activation,
snorm=snorm)
def forward(self, x,adain_params=None):
residual = x
if self.convolve_res:
residual = self.res_conv(residual)
out = self.conv1(x,adain_params)
out = self.conv2(out,adain_params)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(
self,
in_dim,
out_dim,
ks,
st,
padding=0,
norm="none",
activation="elu",
pad_type="zero",
use_bias=True,
activation_first=False,
snorm=False
):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == "reflect":
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == "replicate":
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == "zero":
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == "bn":
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == "in":
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == "group":
self.norm = nn.GroupNorm(num_channels=norm_dim,num_groups=16)
elif norm == "adain":
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == "none":
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == "relu":
self.activation = nn.ReLU(inplace=True)
elif activation == "lrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == "tanh":
self.activation = nn.Tanh()
elif activation == "elu":
self.activation = nn.ELU()
elif activation == "none":
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
if snorm:
self.conv = spectral_norm(nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias))
else:
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x, adain_params=None):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
else:
x = self.conv(self.pad(x))
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
if self.activation:
x = self.activation(x)
return x
class NormConv2d(nn.Module):
"""
Convolutional layer with l2 weight normalization and learned scaling parameters
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=0
):
super().__init__()
self.beta = nn.Parameter(
torch.zeros([1, out_channels, 1, 1], dtype=torch.float32)
)
self.gamma = nn.Parameter(
torch.ones([1, out_channels, 1, 1], dtype=torch.float32)
)
self.conv = weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
name="weight",
)
def forward(self, x):
# weight normalization
# self.conv.weight = normalize(self.conv.weight., dim=[0, 2, 3])
out = self.conv(x)
out = self.gamma * out + self.beta
return out
class Conv2dTransposeBlock(nn.Module):
def __init__(
self,
in_dim,
out_dim,
ks,
st,
padding=0,
norm="none",
activation="elu",
use_bias=True,
activation_first=False,
snorm=False
):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize normalization
norm_dim = out_dim
if norm == "bn":
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == "in":
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == "group":
self.norm = nn.GroupNorm(num_channels=norm_dim,num_groups=16)
elif norm == "adain":
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == "none":
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == "elu":
self.activation = nn.ReLU(inplace=True)
elif activation == "lrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == "tanh":
self.activation = nn.Tanh()
elif activation == "none":
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
if snorm:
self.conv = spectral_norm(nn.ConvTranspose2d(in_dim, out_dim, ks, st, bias=self.use_bias, padding=padding, output_padding=padding))
else:
self.conv = nn.ConvTranspose2d(in_dim, out_dim, ks, st, bias=self.use_bias, padding=padding,output_padding=padding)
def forward(self, x, adain_params=None):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(x)
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
else:
x = self.conv(x)
if self.norm and not isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x)
elif isinstance(self.norm,AdaptiveInstanceNorm2d):
x = self.norm(x, adain_params)
if self.activation:
x = self.activation(x)
return x
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
def forward(self, x, adain_params):
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped,
running_mean,
running_var,
adain_params["weight"],
adain_params["bias"],
True,
self.momentum,
self.eps,
)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + "(" + str(self.num_features) + ")"
class AdaINLinear(nn.Module):
def __init__(self, in_units, target_units, use_bias=True, actfn=nn.ReLU):
super().__init__()
self.linear = nn.Linear(in_units, 2 * target_units, bias=use_bias)
self.act_fn = actfn()
def forward(self, x):
out = self.act_fn(self.linear(x))
out = {
"weight": out[:, : out.size(1) // 2],
"bias": out[:, out.size(1) // 2 :],
}
return out
class ConvGRUCell(nn.Module):
"""
Generate a convolutional GRU cell
"""
def __init__(self, input_size, hidden_size, kernel_size,upsample=False):
super().__init__()
padding = kernel_size // 2
self.input_size = input_size
self.upsample = upsample
self.hidden_size = hidden_size
self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.update_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)
if self.upsample:
self.up_gate = nn.ConvTranspose2d(input_size,input_size,kernel_size,2,padding=padding, output_padding=padding)
init.orthogonal_(self.reset_gate.weight)
init.orthogonal_(self.update_gate.weight)
init.orthogonal_(self.out_gate.weight)
init.constant_(self.reset_gate.bias, 0.)
init.constant_(self.update_gate.bias, 0.)
init.constant_(self.out_gate.bias, 0.)
if self.upsample:
init.orthogonal_(self.up_gate.weight)
init.constant_(self.up_gate.bias, 0.)
def forward(self, input_, prev_state):
if self.upsample:
input_ = self.up_gate(input_)
# get batch and spatial sizes
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
# generate empty prev_state, if None is provided
if prev_state is None:
state_size = [batch_size, self.hidden_size] + list(spatial_size)
if torch.cuda.is_available():
prev_state = torch.zeros(state_size).cuda()
else:
prev_state = torch.zeros(state_size)
# data size is [batch, channel, height, width]
stacked_inputs = torch.cat([input_, prev_state], dim=1)
update = torch.sigmoid(self.update_gate(stacked_inputs))
reset = torch.sigmoid(self.reset_gate(stacked_inputs))
out_inputs = torch.tanh(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))
new_state = prev_state * (1 - update) + out_inputs * update
return new_state
class ConvGRU(nn.Module):
def __init__(self, input_size, hidden_sizes, kernel_sizes, n_layers, upsampling:list=None):
'''
Generates a multi-layer convolutional GRU.
Preserves spatial dimensions across cells, only altering depth.
Parameters
----------
input_size : integer. depth dimension of input tensors.
hidden_sizes : integer or list. depth dimensions of hidden state.
if integer, the same hidden size is used for all cells.
kernel_sizes : integer or list. sizes of Conv2d gate kernels.
if integer, the same kernel size is used for all cells.
n_layers : integer. number of chained `ConvGRUCell`.
'''
super(ConvGRU, self).__init__()
if upsampling is None:
upsampling = [False]*n_layers
self.input_size = input_size
if type(hidden_sizes) != list:
self.hidden_sizes = [hidden_sizes]*n_layers
else:
assert len(hidden_sizes) == n_layers, '`hidden_sizes` must have the same length as n_layers'
self.hidden_sizes = hidden_sizes
if type(kernel_sizes) != list:
self.kernel_sizes = [kernel_sizes]*n_layers
else:
assert len(kernel_sizes) == n_layers, '`kernel_sizes` must have the same length as n_layers'
self.kernel_sizes = kernel_sizes
self.n_layers = n_layers
self.cells = []
for i in range(self.n_layers):
if i == 0:
input_dim = self.input_size
else:
input_dim = self.hidden_sizes[i - 1]
self.cells.append(ConvGRUCell(input_dim, self.hidden_sizes[i], self.kernel_sizes[i],upsample=upsampling[i]))
self.cells = nn.Sequential(*self.cells)
def forward(self, x, hidden=None):
'''
Parameters
----------
x : 4D input tensor. (batch, channels, height, width).
hidden : list of 4D hidden state representations. (layer, batch, channels, height, width).
Returns
-------
upd_hidden : 5D hidden representation. (layer, batch, channels, height, width).
'''
if hidden is None:
hidden = [None]*self.n_layers
input_ = x
upd_hidden = []
for layer_idx in range(self.n_layers):
cell = self.cells[layer_idx]
cell_hidden = hidden[layer_idx]
# pass through layer
upd_cell_hidden = cell(input_, cell_hidden)
upd_hidden.append(upd_cell_hidden)
# update input_ to the last updated hidden layer for next pass
input_ = upd_cell_hidden
# retain tensors in list to allow different hidden sizes
return upd_hidden
# taken from official NVLabs implementation
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
class SPADE(nn.Module):
def __init__(self, norm_nc, label_nc, config):
super().__init__()
param_free_norm_type = config["base_norm_spade"] if "base_norm_spade" in config else "instance"
ks = 3
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out | 17,622 | 33.690945 | 143 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/experiment.py | from abc import abstractmethod
import torch
import wandb
import os
from os import path
from glob import glob
import numpy as np
from utils.general import get_logger
WANDB_DISABLE_CODE = True
class Experiment:
def __init__(self, config:dict, dirs: dict, device):
self.parallel = isinstance(device, list)
self.config = config
self.logger = get_logger(self.config["general"]["project_name"])
self.is_debug = self.config["general"]["debug"]
if self.is_debug:
self.logger.info("Running in debug mode")
if self.parallel:
self.device = torch.device(
f"cuda:{device[0]}" if torch.cuda.is_available() else "cpu"
)
self.all_devices = device
self.logger.info("Running experiment on multiple gpus!")
else:
self.device = device
self.all_devices = [device]
self.dirs = dirs
if torch.cuda.is_available():
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(dev.index if self.parallel else dev) for dev in self.all_devices])
if self.config["general"]["restart"]:
self.logger.info(f'Resume training run with name "{self.config["general"]["project_name"]}" on device(s) {self.all_devices}')
else:
self.logger.info(f'Start new training run with name "{self.config["general"]["project_name"]}" on device(s) {self.all_devices}')
########## seed setting ##########
torch.manual_seed(self.config["general"]["seed"])
torch.cuda.manual_seed(self.config["general"]["seed"])
np.random.seed(self.config["general"]["seed"])
# random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(self.config["general"]["seed"])
rng = np.random.RandomState(self.config["general"]["seed"])
if self.config["general"]["mode"] == "train":
project = "visual_poking_unsupervised"
wandb.init(
dir=self.dirs["log"],
project=project,
name=self.config["general"]["project_name"],
group=self.config["general"]["experiment"],
)
# log paramaters
self.logger.info("Training parameters:")
for key in self.config:
if key != "testing":
self.logger.info(f"{key}: {self.config[key]}") # print to console
wandb.config.update({key: self.config[key]}) # update wandb config
def _load_ckpt(self, key, dir=None,name=None, single_opt = True, use_best=False, load_name = "model"):
if dir is None:
dir = self.dirs["ckpt"]
if name is None:
if len(os.listdir(dir)) > 0:
ckpts = glob(path.join(dir,"*.pt"))
# load latest stored checkpoint
ckpts = [ckpt for ckpt in ckpts if key in ckpt.split("/")[-1]]
if len(ckpts) == 0:
self.logger.info(f"*************No ckpt found****************")
op_ckpt = mod_ckpt = None
return mod_ckpt, op_ckpt
if use_best:
ckpts = [x for x in glob(path.join(dir,"*.pt")) if "=" in x.split("/")[-1]]
ckpts = {float(x.split("=")[-1].split(".")[0]): x for x in ckpts}
ckpt = torch.load(
ckpts[max(list(ckpts.keys()))], map_location="cpu"
)
else:
ckpts = {float(x.split("_")[-1].split(".")[0]): x for x in ckpts}
ckpt = torch.load(
ckpts[max(list(ckpts.keys()))], map_location="cpu"
)
mod_ckpt = ckpt[load_name] if load_name in ckpt else None
if single_opt:
key = [key for key in ckpt if key.startswith("optimizer")]
assert len(key) == 1
key = key[0]
op_ckpt = ckpt[key]
else:
op_ckpt = {key: ckpt[key] for key in ckpt if "optimizer" in key}
msg = "best model" if use_best else "model"
if mod_ckpt is not None:
self.logger.info(f"*************Restored {msg} with key {key} from checkpoint****************")
else:
self.logger.info(f"*************No ckpt for {msg} with key {key} found, not restoring...****************")
if op_ckpt is not None:
self.logger.info(f"*************Restored optimizer with key {key} from checkpoint****************")
else:
self.logger.info(f"*************No ckpt for optimizer with key {key} found, not restoring...****************")
else:
mod_ckpt = op_ckpt = None
return mod_ckpt, op_ckpt
else:
# fixme add checkpoint loading for best performing models
ckpt_path = path.join(dir,name)
if not path.isfile(ckpt_path):
self.logger.info(f"*************No ckpt for model and optimizer found under {ckpt_path}, not restoring...****************")
mod_ckpt = op_ckpt = None
else:
if "epoch_ckpts" in ckpt_path:
mod_ckpt = torch.load(
ckpt_path, map_location="cpu"
)
op_path = ckpt_path.replace("model@","opt@")
op_ckpt = torch.load(op_path,map_location="cpu")
return mod_ckpt,op_ckpt
ckpt = torch.load(ckpt_path, map_location="cpu")
mod_ckpt = ckpt[load_name] if load_name in ckpt else None
op_ckpt = ckpt["optimizer"] if "optimizer" in ckpt else None
if mod_ckpt is not None:
self.logger.info(f"*************Restored model under {ckpt_path} ****************")
else:
self.logger.info(f"*************No ckpt for model found under {ckpt_path}, not restoring...****************")
if op_ckpt is not None:
self.logger.info(f"*************Restored optimizer under {ckpt_path}****************")
else:
self.logger.info(f"*************No ckpt for optimizer found under {ckpt_path}, not restoring...****************")
return mod_ckpt,op_ckpt
@abstractmethod
def train(self):
"""
Here, the experiment shall be run
:return:
"""
pass
@abstractmethod
def test(self):
"""
Here the prediction shall be run
:param ckpt_path: The path where the checkpoint file to load can be found
:return:
"""
pass
| 6,865 | 40.361446 | 140 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/fixed_length_model.py | import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.optim import Adam
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Average, MetricUsage
import numpy as np
import wandb
from functools import partial
from lpips import LPIPS
from tqdm import tqdm
from experiments.experiment import Experiment
from data import get_dataset
from data.samplers import FixedLengthSampler
from models.latent_flow_net import SingleScaleBaseline,SkipSequenceModel
from models.discriminator import GANTrainer
from utils.losses import PerceptualVGG,vgg_loss_agg,DynamicsLoss, style_loss
from utils.testing import make_flow_grid, make_img_grid, make_video, make_plot
from utils.metrics import metric_fid, FIDInceptionModel, metric_lpips, psnr_lightning, ssim_lightning
from utils.general import linear_var, get_member, get_patches
class FixedLengthModel(Experiment):
def __init__(self, config, dirs, device):
super().__init__(config, dirs, device)
self.datakeys = ["images","poke"]
if self.config["architecture"]["disentanglement"]:
self.datakeys.append("img_aT")
self.datakeys.append("app_img_random")
# used for efficient metrics computation
self.fid_feats_real_per_frame = {}
self.fid_feats_fake_per_frame = {}
self.psnrs = {"t": [], "tk": [], "pl" : []}
self.ssims = {"t": [], "tk": [], "pl" : []}
self.lpips = {"t": [], "tk": []}
self.use_gan = self.config["gan"]["use"]
self.use_temp_disc = self.config["gan_temp"]["use"]
if self.use_temp_disc:
if not self.config["gan_temp"]["patch_temp_disc"]:
assert not self.config["gan_temp"]["conditional"]
#self.pixel_decoder_loss = self.config["training"]["pixel_dynamics_weight"] > 0
self.lr_dec_t = 0
self.target_dev = None
# metrics for each frame
self.ssims_per_frame = {}
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
self.lpips_avg = None
self.custom_sampler = self.config["training"]["custom_sampler"] if "custom_sampler" in self.config["training"] else False
self.poke_jump = self.config["training"]["poke_jump"] if "poke_jump" in self.config["training"] else False
self.poke_scale_mode = self.config["architecture"]["poke_scale"] if "poke_scale" in self.config["architecture"] else False
if self.poke_jump:
assert not self.poke_scale_mode
def __clear_metric_arrs(self):
[self.psnrs[key].clear() for key in self.psnrs]
[self.ssims[key].clear() for key in self.ssims]
[self.lpips[key].clear() for key in self.lpips]
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
self.ssims_per_frame = {}
self.fid_feats_real_per_frame = {}
self.fid_feats_fake_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
def train(self):
########## checkpoints ##########
if self.config["general"]["restart"] and not self.is_debug:
mod_ckpt, op_ckpts = self._load_ckpt("reg_ckpt", single_opt=False)
op_ckpt_dis = op_ckpts["optimizer_dis"]
op_ckpt_dyn = op_ckpts["optimizer_dyn"]
else:
mod_ckpt = op_ckpt_dis = op_ckpt_dyn = None
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if not self.poke_scale_mode:
del self.config["data"]["n_ref_frames"]
dataset, transforms = get_dataset(config=self.config["data"])
train_dataset = dataset(transforms, self.datakeys, self.config["data"], train=True)
test_datakeys = self.datakeys + ["app_img_random"] if self.config["testing"]["eval_app_transfer"] and "app_img_random" not in self.datakeys else self.datakeys
test_datakeys.append("flow")
test_dataset = dataset(transforms, test_datakeys, self.config["data"], train=False)
if self.custom_sampler:
train_sampler = FixedLengthSampler(train_dataset, self.config["training"]["batch_size"],shuffle=True,
weighting=train_dataset.obj_weighting,drop_last=True,zero_poke=True, zero_poke_amount=self.config["training"]["zeropoke_amount"])
train_loader = DataLoader(train_dataset, batch_sampler=train_sampler,num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,)
test_sampler = FixedLengthSampler(test_dataset, batch_size=self.config["training"]["batch_size"], shuffle=True,
drop_last=True, weighting=test_dataset.obj_weighting,zero_poke=True,zero_poke_amount=self.config["training"]["zeropoke_amount"])
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"], #
worker_init_fn=w_init_fn,
)
eval_sampler = FixedLengthSampler(test_dataset,batch_size=self.config["testing"]["test_batch_size"],shuffle=True,
drop_last=True,weighting=test_dataset.obj_weighting,zero_poke=False)
eval_loader = DataLoader(test_dataset,
batch_sampler=eval_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,)
self.logger.info("Using custom fixed length sampler.")
else:
self.logger.info("Using standard pytorch random sampler")
train_sampler = RandomSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=self.config["training"]["batch_size"],
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True
)
test_sampler = RandomSampler(test_dataset,)
test_loader = DataLoader(
test_dataset,
batch_size=self.config["training"]["batch_size"],
sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True
)
# no zeropoke for evaluation as zeropoke is only to ensure no reaction when poking outside
eval_sampler = SequentialSampler(test_dataset,)
eval_loader = DataLoader(test_dataset,
sampler=eval_sampler,
batch_size=self.config["testing"]["test_batch_size"],
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True)
# define model
self.logger.info(f"Load model...")
#net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = SkipSequenceModel(spatial_size=self.config["data"]["spatial_size"],config=self.config["architecture"]) if self.config["architecture"]["use_skip_model"] else \
SingleScaleBaseline(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"], )
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
if self.config["general"]["restart"] and mod_ckpt is not None:
self.logger.info("Load pretrained paramaters and resume training.")
net.load_state_dict(mod_ckpt)
if self.parallel:
net = torch.nn.DataParallel(net, device_ids=self.all_devices)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
# log weights and gradients
wandb.watch(net, log="all")
# define optimizers
# appearance and shape disentanglement
dis_params = [{"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"},
{"params": get_member(net,"dec").parameters(), "name": "decoder"}
]
optimizer_dis = Adam(dis_params, lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dis is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dis.load_state_dict(op_ckpt_dis)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dis = torch.optim.lr_scheduler.MultiStepLR(optimizer_dis, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# dynamics
dyn_params = [{"params": get_member(net,"dynamics_enc").parameters(), "name": "dynamics_encoder", },
{"params": get_member(net,"fusion_block").parameters(), "name": "fusion_block",},]
if self.config["training"]["decoder_update_tk"]:
dyn_params.append({"params": get_member(net,"dec").parameters(), "name": "decoder"})
if "singlestage" in self.config["training"] and self.config["training"]["singlestage"]:
dyn_params.append({"params": get_member(net, "shape_enc").parameters(), "name": "shape_encoder"})
optimizer_dyn = Adam(dyn_params, lr = self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dyn is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dyn.load_state_dict(op_ckpt_dyn)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dyn = torch.optim.lr_scheduler.MultiStepLR(optimizer_dyn, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# initialize disc if gan mode is enabled
if self.use_gan:
gan_trainer = GANTrainer(self.config, self._load_ckpt, self.logger,spatial_size=self.config["data"]["spatial_size"][0] ,
parallel=self.parallel, devices=self.all_devices, debug=self.is_debug)
if self.use_temp_disc:
gan_trainer_temp = GANTrainer(self.config, self._load_ckpt,self.logger,spatial_size=self.config["data"]["spatial_size"][0],
parallel=self.parallel,devices=self.all_devices, debug=self.is_debug,temporal=True, sequence_length=train_dataset.max_frames)
# set start iteration and epoch in case model training is resumed
start_it = 0
start_epoch = 0
n_epoch_train = self.config["training"]["n_epochs"]
if self.config["general"]["restart"] and op_ckpts is not None:
start_it = list(optimizer_dis.state_dict()["state"].values())[-1]["step"]
start_epoch = int(np.floor(start_it / len(train_loader)))
assert self.config["training"]["n_epochs"] > start_epoch
n_epoch_train = self.config["training"]["n_epochs"] - start_epoch
#
lr_dec_rec = partial(linear_var,start_it=0,
end_it=self.config["training"]["lr_dec_end_it"],
start_val=self.config["training"]["lr"],
end_val=self.config["training"]["lr_dec_end_val"],
clip_min=0,
clip_max=self.config["training"]["lr"],)
self.lr_dec_t = lr_dec_rec(start_it)
# losses
self.logger.info("Load VGG")
self.vgg = PerceptualVGG()
if self.parallel:
self.vgg = torch.nn.DataParallel(self.vgg,device_ids=self.all_devices)
self.vgg.cuda(self.all_devices[0])
self.logger.info("VGG on gpu")
# from torchsummary import summary
# summary(vgg.vgg,(3,224,224))
self.logger.info("Initialize persistent losses")
latent_dynamics_loss = DynamicsLoss(config=self.config["training"])
self.logger.info("Finished initializing persistent losses.")
def train_step(engine,batch):
net.train()
# prepare data
weights=None
loss_dis = 0
out_dict = {}
if train_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
weights = batch["poke"][1].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
x_t = batch["images"][:, 0].cuda(self.all_devices[0])
x_seq = batch["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = batch["img_aT"].cuda(self.all_devices[0])
# apply style loss
app_img_tr = batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr,x_t,poke,len=0)
loss_style = style_loss(self.vgg,app_img_tr,x_trans)
loss_dis = self.config["training"]["style_loss_weight"] * loss_style
out_dict.update({"style_loss": loss_style.item()})
else:
shape_img = x_t
x_t_hat_i, sigma_t, _ , alpha = net(x_seq[:,-1],shape_img,poke,len=0)
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
# static loss to obtain fixed image state space
if "singlestage" not in self.config["training"] or not self.config["training"]["singlestage"]:
loss_dis = loss_dis + vgg_loss_agg(self.vgg, x_t, x_t_hat_i)
#optimize parameter of appearance, shape encoders and decoder
optimizer_dis.zero_grad()
loss_dis.backward()
optimizer_dis.step()
out_dict.update({"loss_dis" : loss_dis.item()})
#optimize in alternating gradient descent as this results in equal results than training the static/dynamic model in two completely seperate stages
# however, it performs significantly better than training both models jointly with a single optimizer step (see ablations or run the model with 'singlestage' set to true)
# forward pass for training of dynamics part of the model
# dynamics losses
seq_len = x_seq.shape[1]
seq_rec, mu_delta, sigmas_hat, logstd_delta = net(x_t,shape_img,poke,len=seq_len,
poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
if weights is not None:
seq_rec = get_patches(seq_rec,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
x_seq = get_patches(x_seq,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
for n in range(seq_len):
x_hat_tn,s_tn,*_ = net(x_seq[:,n],x_seq[:,n],poke,len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
w = 1. if n != n_ref_frames else self.config["training"]["target_weight"]
ll_dyn_n =w * vgg_loss_agg(self.vgg,x_seq[:,n],seq_rec[:,n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_dyn = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
#latent dynamics
dyn_losses = []
for s_tk,s_hat_tk in zip(sigmas_gt,sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk,s_tk,[]))
latent_loss_dyn = torch.stack(dyn_losses).mean()
loss_dyn = self.config["training"]["vgg_dyn_weight"] * ll_loss_dyn + self.config["training"]["latent_dynamics_weight"] * latent_loss_dyn
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
if self.config["gan"]["pixel_dynamics"]:
offsets = np.random.choice(np.arange(max(1,x_seq.shape[1]-train_dataset.max_frames)),size=x_seq.shape[0])
true_exmpls = torch.stack([seq[o:o+train_dataset.max_frames] for seq, o in zip(x_seq,offsets)],dim=0)
fake_exmpls = torch.stack([seq[o:o+train_dataset.max_frames] for seq, o in zip(seq_rec, offsets)], dim=0)
x_true = torch.cat([true_exmpls[:,1:],true_exmpls[:,:-1]],dim=2).reshape(-1,2*true_exmpls.shape[2],*true_exmpls.shape[3:])
x_fake = torch.cat([fake_exmpls[:, 1:], true_exmpls[:, :-1]], dim=2).reshape(-1, 2 * fake_exmpls.shape[2], *fake_exmpls.shape[3:])
else:
true_exmpls = np.random.choice(np.arange(x_seq.shape[0]*x_seq.shape[1]),self.config["gan"]["n_examples"])
fake_exmpls = np.random.choice(np.arange(seq_rec.shape[0]*seq_rec.shape[1]), self.config["gan"]["n_examples"])
x_true = x_seq.view(-1,*x_seq.shape[2:])[true_exmpls]
x_fake = seq_rec.view(-1,*seq_rec.shape[2:])[fake_exmpls]
disc_dict, loss_gen, loss_fmap = gan_trainer.train_step(x_true, x_fake)
loss_dyn = loss_dyn + self.config["gan"]["gen_weight"] * loss_gen + self.config["gan"]["fmap_weight"] * loss_fmap
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
seq_len_act = x_seq.shape[1]
offset = int(np.random.choice(np.arange(max(1,seq_len_act-train_dataset.max_frames)),1))
# offset_fake = int(np.random.choice(np.arange(max(1,seq_len_act-seq_len_temp_disc)), 1))
x_fake_tmp = seq_rec[:,offset:offset+train_dataset.max_frames].permute(0,2,1,3,4)
x_true_tmp = x_seq[:, offset:offset+train_dataset.max_frames].permute(0,2,1,3,4)
if self.config["gan_temp"]["conditional"]:
cond = get_patches(poke,weights,self.config["data"],test_dataset.weight_value_flow,self.logger) if test_dataset.flow_weights else poke
else:
cond = None
disc_dict_temp, loss_gen_temp, loss_fmap_temp = gan_trainer_temp.train_step(x_true_tmp,x_fake_tmp,cond)
loss_dyn = loss_dyn + self.config["gan_temp"]["gen_weight"] * loss_gen_temp + self.config["gan_temp"]["fmap_weight"] * loss_fmap_temp
# optimize parameters of dynamics part
optimizer_dyn.zero_grad()
loss_dyn.backward()
optimizer_dyn.step()
out_dict.update({"loss_dyn":loss_dyn.item() ,"vgg_loss_dyn" : ll_loss_dyn.item(), "latent_loss_dyn": latent_loss_dyn.item(), "lr_dec_t": self.lr_dec_t})
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
out_dict.update(disc_dict)
out_dict.update({"loss_gen_patch" :loss_gen.item(), "loss_fmap_patch": loss_fmap.item()})
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
out_dict.update(disc_dict_temp)
out_dict.update({"loss_gen_temp" :loss_gen_temp.item(), "loss_fmap_temp": loss_fmap_temp.item()})
return out_dict
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
def eval_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:,0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:,1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
app_img_tr = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr, x_t, poke,len=0)
loss_style = style_loss(self.vgg, app_img_tr, x_trans)
out_dict.update({"style_loss_eval": loss_style.item()})
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t,x_t,poke,len=0)
x_seq_hat, _, sigmas_hat,_ = net(x_t, x_t, poke,len=seq_len,poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1,poke_jump=self.poke_jump)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat,weights,self.config["data"],test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
for n in range(seq_len):
x_hat_tn, s_tn, *_ = net(x_seq_gt[:, n], x_seq_gt[:, n], poke, len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg, x_seq_gt[:, n], x_seq_hat[:, n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_tk_eval = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
rec_imgs = get_patches(rec_imgs, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at all timesteps
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in self.fid_feats_fake_per_frame:
self.fid_feats_fake_per_frame.update({t: fake_features_t})
self.fid_feats_real_per_frame.update({t: real_features_t})
else:
self.fid_feats_fake_per_frame[t] = np.concatenate([self.fid_feats_fake_per_frame[t], fake_features_t], axis=0)
self.fid_feats_real_per_frame[t] = np.concatenate([self.fid_feats_real_per_frame[t], real_features_t], axis=0)
# evaluate training losses
# ll_loss_tk_eval = vgg_loss_agg(self.vgg, x_tk, x_tk_hat)
ll_loss_t_i_eval = vgg_loss_agg(self.vgg, x_t, x_t_hat)
dyn_losses = []
for s_tk, s_hat_tk in zip(sigmas_gt, sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk, s_tk, []))
latent_loss_dyn_eval = torch.stack(dyn_losses).mean()
out_dict.update({"vgg_loss_dyn_eval": ll_loss_tk_eval.item(), "loss_dis_i_eval": ll_loss_t_i_eval.item(), "latent_loss_dyn_eval": latent_loss_dyn_eval.item()})
# compute metrics
ssim_t = ssim_lightning(x_t, x_t_hat)
psnr_t = psnr_lightning(x_t, x_t_hat)
lpips_t = metric_lpips(x_t,x_t_hat, self.lpips_fn, reduce=False)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat,self.lpips_fn,reduce=False,return_per_frame=True)
# ssim_pl, ssim_pl_per_frame = ssim_lightning(x_seq_gt,x_seq_hat,return_per_frame=True)
# psnr_pl, psnr_pl_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
# append to arrays
self.lpips["t"].append(lpips_t)
self.psnrs["t"].append(psnr_t)
self.ssims["t"].append(ssim_t)
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
#self.ssims["pl"].append(ssim_pl)
#self.psnrs["pl"].append(psnr_pl)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key:[ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key:[psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key:[lpips_per_frame[key]]}) for key in lpips_per_frame]
#[self.ssims_per_frame_pl[key].append(ssim_pl_per_frame[key]) if key in self.ssims_per_frame_pl else self.ssims_per_frame_pl.update({key: [ssim_pl_per_frame[key]]}) for key in ssim_pl_per_frame]
#[self.psnrs_per_frame_pl[key].append(psnr_pl_per_frame[key]) if key in self.psnrs_per_frame_pl else self.psnrs_per_frame_pl.update({key: [psnr_pl_per_frame[key]]}) for key in psnr_pl_per_frame]
return out_dict
# test_it steps are performed while generating test_imgs, there n_test_img is overall number divided by number of test iterations
n_test_img = int(self.config["testing"]["n_test_img"] // self.config["testing"]["test_it"])
def eval_visual(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1]
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
flow = eval_batch["flow"]
if self.config["architecture"]["disentanglement"]:
shape_img = eval_batch["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t,x_t, poke, len=seq_len,poke_linear=self.poke_scale_mode,n_zero_frames=seq_len-n_ref_frames-1, poke_jump = self.poke_jump)
x_t_hat , *_ = net(x_seq_gt[:,-1],shape_img,poke,len=0)
grid_dis = make_img_grid(x_seq_gt[:,-1],shape_img, x_t_hat,x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img, flow=flow)
seq_vis_hat = torch.cat([x_t.unsqueeze(1), x_seq_hat], 1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img,flow=flow, display_frame_nr=True)
it = engine.state.iteration
log_dict = {"Last Frame Comparison Test data": wandb.Image(grid_dyn, caption=f"Last frames test grid #{it}."),
"Disentanglement Grid Test Data": wandb.Image(grid_dis, caption=f"Test grid disentanglement #{it}."),
"Video Grid Test Data": wandb.Video(grid_anim,caption=f"Test Video Grid #{it}.",fps=5)}
if self.config["testing"]["eval_app_transfer"]:
app_img_unrelated = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_transferred, *_ = net(app_img_unrelated,x_t, poke,len=0)
transfer_grid = make_img_grid(app_img_unrelated,x_t,x_transferred)
log_dict.update({"Appearance transfer grid Test Data": wandb.Image(transfer_grid, caption=f"Test_grid appearance transfer #{it}")})
wandb.log(log_dict)
return None
self.logger.info("Initialize engines...")
trainer = Engine(train_step)
evaluator = Engine(eval_step)
test_img_generator = Engine(eval_visual)
self.logger.info("Finish engine initialization...")
# checkpointing
self.logger.info("Add checkpointing and pbar...")
n_saved = 10
self.logger.info(f"Checkpoint saving window is {n_saved}")
ckpt_handler = ModelCheckpoint(self.dirs["ckpt"], "reg_ckpt", n_saved=n_saved, require_empty=False)
save_dict = {"model": net, "optimizer_dis": optimizer_dis, "optimizer_dyn": optimizer_dyn}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler,
save_dict)
if self.use_gan:
ckpt_handler_disc = ModelCheckpoint(self.dirs["ckpt"], gan_trainer.load_key, n_saved=10, require_empty=False)
save_dict_disc = {"model": gan_trainer.disc, "optimizer": gan_trainer.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc,
save_dict_disc)
if self.use_temp_disc:
ckpt_handler_disc_temp = ModelCheckpoint(self.dirs["ckpt"], gan_trainer_temp.load_key, n_saved=10, require_empty=False)
save_dict_disc_temp = {"model": gan_trainer_temp.disc, "optimizer": gan_trainer_temp.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc_temp,
save_dict_disc_temp)
pbar = ProgressBar(ascii=True)
pbar.attach(trainer, output_transform=lambda x: x)
pbar.attach(evaluator, output_transform=lambda x: x)
#reduce the learning rate of the decoder for the image reconstruction task, such that the model focusses more on t --> tk
@trainer.on(Events.ITERATION_COMPLETED)
def update_lr(engine):
self.lr_dec_t = lr_dec_rec(engine.state.iteration)
for g in optimizer_dis.param_groups:
if g["name"] == "decoder":
g["lr"] = self.lr_dec_t
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["log_intervall"]))
def log(engine):
it = engine.state.iteration
wandb.log({"iteration": it})
# log losses
for key in engine.state.output:
wandb.log({key: engine.state.output[key]})
data = engine.state.batch
if test_dataset.flow_weights:
poke = data["poke"][0].cuda(self.all_devices[0])
else:
poke = data["poke"].cuda(self.all_devices[0])
x_t = data["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = data["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = data["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
n_ref_frames = self.config["data"]["n_ref_frames"] - 1 if self.poke_scale_mode else train_dataset.max_frames -1
net.eval()
seq_len = x_seq_gt.shape[1]
with torch.no_grad():
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len, poke_linear=self.poke_scale_mode, n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
x_t_hat, *_ = net(x_seq_gt[:,-1], shape_img, poke,len=0)
#x_t_hat_e, *_ = net(img_aT, img_sT, poke)
grid_dis_i = make_img_grid(x_seq_gt[:,-1], shape_img, x_t_hat, x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img)
seq_vis_hat = torch.cat([x_t.unsqueeze(1),x_seq_hat],1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img, display_frame_nr=True)
wandb.log({"Last Frame Comparison Train Data": wandb.Image(grid_dyn, caption=f"Last frames train grid after {it} train steps."),
"Disentanglement Grid Invariance Train Data": wandb.Image(grid_dis_i, caption=f"Invariance Disentanglement Grid on train set after {it} train steps."),
"Video Grid Train Data": wandb.Video(grid_anim, caption=f"Train Video Grid after {it} train steps",fps=5)})
#"Disentanglement Grid Equivariance Train Data": wandb.Image(grid_dis_e, caption=f"Eqiuvariance Disentanglement Grid on train set after {it} train steps.")
self.logger.info("Initialize metrics...")
# compute loss average over epochs
# Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
if "singlestage" not in self.config["training"] or not self.config["training"]["singlestage"]:
Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["loss_dis_i_eval"]).attach(evaluator, "loss_dis_i_eval")
Average(output_transform=lambda x: x["vgg_loss_dyn"]).attach(trainer, "vgg_loss_dyn-epoch_avg")
Average(output_transform=lambda x: x["latent_loss_dyn"]).attach(trainer, "latent_loss_dyn-epoch_avg")
if "disentanglement" in self.config["architecture"] and self.config["architecture"]["disentanglement"]:
Average(output_transform=lambda x: x["style_loss"]).attach(trainer, "style_loss-epoch_avg")
Average(output_transform=lambda x: x["style_loss_eval"]).attach(evaluator, "style_loss_eval")
if self.use_temp_disc or self.use_gan:
def gan_training_started(engine,epoch, key="gan"):
return engine.state.iteration >= self.config[key]["start_iteration"]
if self.use_gan:
use_patchgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=gan_training_started),
completed=Events.EPOCH_COMPLETED(event_filter=gan_training_started),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=gan_training_started))
# gan losses
Average(output_transform=lambda x: x["loss_gen_patch"]).attach(trainer, "loss_gen_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_patch"]).attach(trainer, "loss_fmap_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_disc_patch"]).attach(trainer, "loss_disc_patch-epoch_avg",usage=use_patchgan_metrics)
#if self.config["gan"]["gp_weighflow_video_generatort"] > 0:
Average(output_transform=lambda x: x["loss_gp_patch"]).attach(trainer, "loss_gp_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_""true_patch"]).attach(trainer, "p_true_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_fake_patch"]).attach(trainer, "p_fake_patch-epoch_avg",usage=use_patchgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def gan_stuff(engine):
gan_trainer.disc_scheduler.step()
if self.use_temp_disc:
use_tmpgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=partial(gan_training_started,key="gan_temp")),
completed=Events.EPOCH_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")))
# gan losses
Average(output_transform=lambda x: x["loss_gen_temp"]).attach(trainer, "loss_gen_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_temp"]).attach(trainer, "loss_fmap_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_disc_temp"]).attach(trainer, "loss_disc_temp-epoch_avg",usage=use_tmpgan_metrics)
#if self.config["gan"]["gp_weight"] > 0:
Average(output_transform=lambda x: x["loss_gp_temp"]).attach(trainer, "loss_gp_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_true_temp"]).attach(trainer, "p_true_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_fake_temp"]).attach(trainer, "p_fake_temp-epoch_avg",usage=use_tmpgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def temp_disc_stuff(engine):
gan_trainer_temp.disc_scheduler.step()
# evaluation losses
Average(output_transform=lambda x: x["vgg_loss_dyn_eval"]).attach(evaluator, "vgg_loss_dyn_eval")
Average(output_transform=lambda x: x["latent_loss_dyn_eval"]).attach(evaluator, "latent_loss_dyn_eval")
self.logger.info("Finish metric initialization.")
@trainer.on(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]))
def metrics(engine):
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
if self.config["gan_temp"]["use"]:
gan_trainer_temp.disc.cpu()
if self.config["gan"]["use"]:
gan_trainer.disc.cpu()
# compute metrics over an epoch
self.logger.info(f"Computing metrics after epoch #{engine.state.epoch}")
batch_size = eval_sampler.batch_size if self.config["training"]["custom_sampler"] else eval_loader.batch_size
bs = 20 if self.is_debug else (int(8000 / batch_size) if len(test_dataset) > 8000 else len(eval_loader))
evaluator.run(eval_loader, max_epochs=1, epoch_length=bs)
[wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
test = np.stack(self.ssims["t"], axis=0)
ssim_t = np.mean(np.stack(self.ssims["t"], axis=0))
psnr_t = np.mean(np.stack(self.psnrs["t"], axis=0))
lpips_t = np.mean(np.concatenate(self.lpips["t"], axis=0))
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
self.lpips_avg = lpips_avg
fid_per_frame = {}
for key in tqdm(self.fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(self.fid_feats_real_per_frame[key], self.fid_feats_fake_per_frame[key])
#fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-t": ssim_t, "psnr-t": psnr_t, "lpips-t": lpips_t,"ssim-tk": ssim_tk, "psnr-tk": psnr_tk, "fid-tk": fid_avg, "lpips-avg": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
# self.ssims_per_frame_pl = {key: np.stack(self.ssims_per_frame_pl[key], axis=0).mean() for key in self.ssims_per_frame_pl}
# self.psnrs_per_frame_pl = {key: np.stack(self.psnrs_per_frame_pl[key], axis=0).mean() for key in self.psnrs_per_frame_pl}
x = [k + 1 for k in self.lpips_per_frame]
make_plot(x, list(self.lpips_per_frame.values()), "LPIPS of predicted frames", ylabel="Average LPIPS")
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM")
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR")
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID")
wandb.log(log_dict)
# clear collection arrays
self.__clear_metric_arrs()
self.inception_model.cpu()
self.lpips_fn.cpu()
if self.config["gan_temp"]["use"]:
gan_trainer_temp.disc.cuda(self.all_devices[0])
if self.config["gan"]["use"]:
gan_trainer.disc.cuda(self.all_devices[0])
# set
#toggle_gpu(True)
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["test_img_intervall"]))
def make_test_grid(engine):
test_img_generator.run(test_loader, max_epochs=1, epoch_length=self.config["testing"]["test_it"])
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_avg(engine):
wandb.log({"epoch": engine.state.epoch})
[wandb.log({key: engine.state.metrics[key]}) for key in engine.state.metrics]
# also perform scheduler step
scheduler_dis.step()
scheduler_dyn.step()
def score_fn(engine):
assert self.lpips_avg is not None
return -self.lpips_avg
# define best ckpt
best_ckpt_handler = ModelCheckpoint(self.dirs["ckpt"],filename_prefix="ckpt_metric" ,score_function=score_fn,score_name="lpips",n_saved=5,require_empty=False)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]),best_ckpt_handler,save_dict)
@trainer.on(Events.STARTED)
def set_start_it(engine):
self.logger.info(f'Engine starting from iteration {start_it}, epoch {start_epoch}')
engine.state.iteration = start_it
engine.state.epoch = start_epoch
# run everything
n_step_per_epoch = 10 if self.is_debug else len(train_loader)
self.logger.info("Start training...")
trainer.run(train_loader, max_epochs=n_epoch_train, epoch_length=n_step_per_epoch)
self.logger.info("End training.")
def test(self):
from tqdm import tqdm
import cv2
from os import makedirs,path
mod_ckpt, _ = self._load_ckpt("reg_ckpt", single_opt=False)
dataset, transforms = get_dataset(config=self.config["data"])
test_dataset = dataset(transforms, self.datakeys, self.config["data"], train=False)
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if self.custom_sampler:
test_sampler = FixedLengthSampler(test_dataset, batch_size=self.config["testing"]["test_batch_size"], shuffle=True,
drop_last=True, weighting=test_dataset.obj_weighting, zero_poke=False)
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"], #
worker_init_fn=w_init_fn,
)
self.logger.info("Using custom data sampler")
else:
test_sampler = RandomSampler(test_dataset, )
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
batch_size=16,
num_workers=self.config["data"]["num_workers"],
worker_init_fn=w_init_fn,
drop_last=True)
self.logger.info("Using common torch sampler")
# define model
self.logger.info(f"Sequence length is {test_dataset.max_frames}")
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else SingleScaleBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"], )
weights = [5, 5, 0]
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
net.load_state_dict(mod_ckpt)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
net.eval()
if self.config["testing"]["mode"] == "metrics":
fid_feats_real_per_frame = {}
fid_feats_fake_per_frame = {}
def metric_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
n_ref_frames = self.config["data"]["n_ref_frames"] -1 if "n_ref_frames" in self.config["data"] else self.config["data"]["max_frames"]
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t, x_t, poke, len=0)
x_seq_hat, _, sigmas_hat, _ = net(x_t, x_t, poke, len=seq_len, poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in fid_feats_fake_per_frame:
fid_feats_fake_per_frame.update({t: fake_features_t})
fid_feats_real_per_frame.update({t: real_features_t})
else:
fid_feats_fake_per_frame[t] = np.concatenate([fid_feats_fake_per_frame[t], fake_features_t], axis=0)
fid_feats_real_per_frame[t] = np.concatenate([fid_feats_real_per_frame[t], real_features_t], axis=0)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat, self.lpips_fn, reduce=False, return_per_frame=True)
# append to arrays
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key: [ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key: [psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key: [lpips_per_frame[key]]}) for key in lpips_per_frame]
return out_dict
evaluator = Engine(metric_step)
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
pbar = ProgressBar(ascii=True)
pbar.attach(evaluator, output_transform=lambda x: x)
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
# compute metrics over an epoch
self.logger.info(f"Start metrics computation.")
batch_size = test_sampler.batch_size if self.custom_sampler else test_loader.batch_size
el = (int(8000 / batch_size) if len(test_dataset) > 8000 else len(test_loader))
evaluator.run(test_loader, max_epochs=1, epoch_length=el)
# [wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
assert list(fid_feats_real_per_frame.keys()) == list(fid_feats_fake_per_frame.keys())
fid_per_frame = {}
for key in tqdm(fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(fid_feats_real_per_frame[key], fid_feats_fake_per_frame[key])
# fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-avg-temp": ssim_tk, "psnr-avg_temp": psnr_tk, "fid-avg_temp": fid_avg, "lpips-avg-temp": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
savedir = path.join(self.dirs["generated"], "metric_summaries")
makedirs(savedir, exist_ok=True)
x = [k + 1 for k in self.lpips_per_frame]
make_plot(x, list(self.lpips_per_frame.values()), "LPIPS of predicted frames", ylabel="Average LPIPS", savename=path.join(savedir, "lpips.svg"))
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM", savename=path.join(savedir, "ssim.svg"))
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR", savename=path.join(savedir, "psnr.svg"))
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID", savename=path.join(savedir, "fid.svg"))
self.logger.info("Averaged metrics: ")
for key in log_dict:
self.logger.info(f'{key}: {log_dict[key]}')
elif self.config["testing"]["mode"] == "fvd":
batch_size = test_sampler.batch_size if self.custom_sampler else test_loader.batch_size
el = (int(1000 / batch_size) if len(test_dataset) > 1000 else len(test_loader))
real_samples = []
fake_samples = []
real_samples_out = []
def generate_vids(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
if engine.state.iteration < el:
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
n_ref_frames = self.config["data"]["n_ref_frames"] -1 if "n_ref_frames" in self.config["data"] else self.config["data"]["max_frames"]
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len,poke_linear=self.poke_scale_mode,
n_zero_frames=seq_len-n_ref_frames-1, poke_jump=self.poke_jump)
real_batch = ((x_seq_gt + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
fake_batch = ((x_seq_hat + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples.append(real_batch)
fake_samples.append(fake_batch)
else:
real_batch = ((eval_batch["images"][:, 1:] + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples_out.append(real_batch)
generator = Engine(generate_vids)
pbar = ProgressBar(ascii=True)
pbar.attach(generator, output_transform=lambda x: x)
self.logger.info(f"Start collecting sequences for fvd computation...")
generator.run(test_loader, max_epochs=1, epoch_length=el)
savedir = path.join(self.dirs["generated"], "samples_fvd")
savedir_exmpls = path.join(savedir,"vid_examples")
makedirs(savedir, exist_ok=True)
makedirs(savedir_exmpls, exist_ok=True)
real_samples = np.stack(real_samples, axis=0)
fake_samples = np.stack(fake_samples, axis=0)
real_samples_out = np.stack(real_samples_out, axis=0)
n_ex = 0
self.logger.info(f"Generating example videos")
for i,(r,f) in enumerate(zip(real_samples,fake_samples)):
savename = path.join(savedir_exmpls,f"sample{i}.mp4")
r = np.concatenate([v for v in r],axis=2)
f = np.concatenate([v for v in f],axis=2)
all = np.concatenate([r,f],axis=1)
writer = cv2.VideoWriter(
savename,
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(all.shape[2], all.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in all:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
n_ex+=1
if n_ex > 20:
break
self.logger.info(f"Saving samples to {savedir}")
np.save(path.join(savedir, "real_samples.npy"), real_samples)
np.save(path.join(savedir, "fake_samples.npy"), fake_samples)
self.logger.info(f'Finish generation of vid samples.') | 57,839 | 54.776278 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/__init__.py | from experiments.experiment import Experiment
from experiments.sequence_model import SequencePokeModel
from experiments.fixed_length_model import FixedLengthModel
__experiments__ = {
"sequence_poke_model": SequencePokeModel,
"fixed_length_model": FixedLengthModel,
}
def select_experiment(config,dirs, device):
experiment = config["general"]["experiment"]
project_name = config["general"]["project_name"]
if experiment not in __experiments__:
raise NotImplementedError(f"No such experiment! {experiment}")
if config["general"]["restart"]:
print(f"Restarting experiment \"{project_name}\" of type \"{experiment}\". Device: {device}")
else:
print(f"Running new experiment \"{project_name}\" of type \"{experiment}\". Device: {device}")
return __experiments__[experiment](config, dirs, device)
| 851 | 37.727273 | 102 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/experiments/sequence_model.py | import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import Average, MetricUsage
import numpy as np
import wandb
from functools import partial
from lpips import LPIPS
from tqdm import tqdm
from experiments.experiment import Experiment
from data import get_dataset
from data.samplers import SequenceLengthSampler
from models.latent_flow_net import ResidualSequenceBaseline,SkipSequenceModel
from models.discriminator import GANTrainer
from utils.losses import PerceptualVGG,vgg_loss_agg,DynamicsLoss, pixel_triplet_loss, style_loss, PixelDynamicsLoss, kl_loss
from utils.testing import make_flow_grid, make_img_grid, make_video, make_hist, make_plot
from utils.metrics import metric_fid, FIDInceptionModel, metric_lpips, psnr_lightning, ssim_lightning
from utils.general import linear_var, get_member, get_patches
class SequencePokeModel(Experiment):
def __init__(self, config, dirs, device):
super().__init__(config, dirs, device)
self.datakeys = ["images","poke"]
if self.config["architecture"]["disentanglement"]:
self.datakeys.append("img_aT")
self.datakeys.append("app_img_random")
if self.config["gan_temp"]["conditional"]:
self.datakeys.append("flow")
# used for efficient metrics computation
self.fid_feats_fake_per_frame = {}
self.fid_feats_real_per_frame = {}
self.psnrs = {"t": [], "tk": [], "pl" : []}
self.ssims = {"t": [], "tk": [], "pl" : []}
self.lpips = {"t": [], "tk": []}
self.fvd_vids_real = []
self.fvd_vids_fake = []
self.use_gan = self.config["gan"]["use"]
self.use_temp_disc = self.config["gan_temp"]["use"]
#self.pixel_decoder_loss = self.config["training"]["pixel_dynamics_weight"] > 0
self.lr_dec_t = 0
self.target_dev = None
# metrics for each frame
self.ssims_per_frame = {}
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
self.lpips_avg = None
self.use_norm_loss = self.config["training"]["norm_loss_weight"] > 0 if "norm_loss_weight" in self.config["training"] else False
if self.use_norm_loss:
assert not self.config["architecture"]["dynamics_var"]
def __clear_metric_arrs(self):
[self.psnrs[key].clear() for key in self.psnrs]
[self.ssims[key].clear() for key in self.ssims]
[self.lpips[key].clear() for key in self.lpips]
self.fvd_vids_real.clear()
self.fvd_vids_fake.clear()
self.lpips_per_frame = {}
self.psnrs_per_frame = {}
self.ssims_per_frame = {}
self.fid_feats_fake_per_frame = {}
self.fid_feats_real_per_frame = {}
# self.ssims_per_frame_pl = {}
# self.psnrs_per_frame_pl = {}
def train(self):
########## checkpoints ##########
if self.config["general"]["restart"] and not self.is_debug:
mod_ckpt, op_ckpts = self._load_ckpt("reg_ckpt", single_opt=not self.config["training"]["two_stage"])
if self.config["training"]["two_stage"]:
op_ckpt_dis = op_ckpts["optimizer_dis"]
op_ckpt_dyn = op_ckpts["optimizer_dyn"]
else:
op_ckpt_dyn = op_ckpts
else:
mod_ckpt = op_ckpt_dis = op_ckpt_dyn = None
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
dataset, transforms = get_dataset(config=self.config["data"])
train_dataset = dataset(transforms, self.datakeys, self.config["data"], train=True)
test_datakeys = self.datakeys + ["app_img_random"] if self.config["testing"]["eval_app_transfer"] and "app_img_random" not in self.datakeys else self.datakeys
test_datakeys.append("flow")
test_dataset = dataset(transforms, test_datakeys, self.config["data"], train=False)
train_sampler = SequenceLengthSampler(train_dataset,
batch_size=self.config["training"]["batch_size"],
shuffle=True,
drop_last=True,
zero_poke=self.config["data"]["include_zeropoke"])
train_loader = DataLoader(
train_dataset,
batch_sampler=train_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
test_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["training"]["batch_size"],
shuffle=True,
drop_last=True,
zero_poke=self.config["data"]["include_zeropoke"])
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
# n_eval_frames = int(test_dataset.max_frames/2)
# if int(test_dataset.min_frames + n_eval_frames) % 2 != 0:
# n_eval_frames+=1
# no zeropoke for evaluation as zeropoke is only to ensure no reaction when poking outside
eval_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["testing"]["test_batch_size"],
shuffle=False,
drop_last=True,
zero_poke = False
)
eval_loader = DataLoader(test_dataset,
batch_sampler=eval_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn )
# define model
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"],)
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
if self.config["general"]["restart"] and mod_ckpt is not None:
self.logger.info("Load pretrained paramaters and resume training.")
net.load_state_dict(mod_ckpt)
if self.parallel:
net = torch.nn.DataParallel(net, device_ids=self.all_devices)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
# log weights and gradients
wandb.watch(net, log="all")
# define optimizers
# appearance and shape disentanglement
if self.config["training"]["two_stage"]:
dis_params = [{"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"},
{"params": get_member(net,"dec").parameters(), "name": "decoder"}
]
if self.config["architecture"]["disentanglement"]:
dis_params.append({"params": net.appearance_enc.parameters(), "name": "appearance_encoder", },)
optimizer_dis = Adam(dis_params, lr=self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dis is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dis.load_state_dict(op_ckpt_dis)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dis = torch.optim.lr_scheduler.MultiStepLR(optimizer_dis, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# dynamics
dyn_params = [{"params": get_member(net,"dynamics_enc").parameters(), "name": "dynamics_encoder", },
{"params": get_member(net,"fusion_block").parameters(), "name": "fusion_block",},]
if self.config["training"]["decoder_update_tk"] or not self.config["training"]["two_stage"]:
dyn_params.append({"params": get_member(net,"dec").parameters(), "name": "decoder"})
if not self.config["training"]["two_stage"]:
dyn_params.append({"params": get_member(net,"shape_enc").parameters(), "name": "shape_encoder"})
optimizer_dyn = Adam(dyn_params, lr = self.config["training"]["lr"])
if self.config["general"]["restart"] and op_ckpt_dyn is not None:
self.logger.info("Load state_dict of optimizer.")
optimizer_dyn.load_state_dict(op_ckpt_dyn)
milestones = [int(self.config["training"]["n_epochs"] * t) for t in self.config["training"]["tau"]]
scheduler_dyn = torch.optim.lr_scheduler.MultiStepLR(optimizer_dyn, milestones=milestones, gamma=self.config["training"]["lr_reduce"])
# initialize disc if gan mode is enabled
if self.use_gan:
gan_trainer = GANTrainer(self.config, self._load_ckpt, self.logger,spatial_size=self.config["data"]["spatial_size"][0] ,
parallel=self.parallel, devices=self.all_devices, debug=self.is_debug)
if self.use_temp_disc:
gan_trainer_temp = GANTrainer(self.config, self._load_ckpt,self.logger,spatial_size=self.config["data"]["spatial_size"][0],
parallel=self.parallel,devices=self.all_devices, debug=self.is_debug,temporal=True, sequence_length=train_dataset.min_frames)
# set start iteration and epoch in case model training is resumed
start_it = 0
start_epoch = 0
n_epoch_train = self.config["training"]["n_epochs"]
n_epoch_overall = self.config["training"]["n_epochs"]
if self.config["general"]["restart"] and op_ckpts is not None:
start_it = list(optimizer_dyn.state_dict()["state"].values())[-1]["step"]
start_epoch = int(np.floor(start_it / len(train_loader)))
assert self.config["training"]["n_epochs"] > start_epoch
n_epoch_train = self.config["training"]["n_epochs"] - start_epoch
#
lr_dec_rec = partial(linear_var,start_it=0,
end_it=self.config["training"]["lr_dec_end_it"],
start_val=self.config["training"]["lr"],
end_val=self.config["training"]["lr_dec_end_val"],
clip_min=0,
clip_max=self.config["training"]["lr"],)
self.lr_dec_t = lr_dec_rec(start_it)
# losses
self.logger.info("Load VGG")
self.vgg = PerceptualVGG()
if self.parallel:
self.vgg = torch.nn.DataParallel(self.vgg,device_ids=self.all_devices)
self.vgg.cuda(self.all_devices[0])
self.logger.info("VGG on gpu")
# from torchsummary import summary
# summary(vgg.vgg,(3,224,224))
self.logger.info("Initialize persistent losses")
latent_dynamics_loss = DynamicsLoss(config=self.config["training"])
pixel_dynamics_loss = partial(pixel_triplet_loss,vgg = self.vgg, diff_pp=self.config["training"]["pixel_dyn_spatial"]) if self.config["training"]["pixel_dynamics_vgg"] else PixelDynamicsLoss()
self.logger.info("Finished initializing persistent losses.")
def train_step(engine,batch):
net.train()
# prepare data
weights=None
loss_dis = 0
out_dict = {}
if train_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
weights = batch["poke"][1].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
x_t = batch["images"][:, 0].cuda(self.all_devices[0])
x_seq = batch["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = batch["img_aT"].cuda(self.all_devices[0])
# apply style loss
app_img_tr = batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr,x_t,poke,len=0)
loss_style = style_loss(self.vgg,app_img_tr,x_trans)
loss_dis = self.config["training"]["style_loss_weight"] * loss_style
out_dict.update({"style_loss": loss_style.item()})
else:
shape_img = x_t
x_t_hat_i, sigma_t, _, alpha = net(x_seq[:, -1], shape_img, poke, len=0)
# disentanglement loss
loss_dis = loss_dis + vgg_loss_agg(self.vgg, x_t, x_t_hat_i)
if engine.state.epoch <= self.config["training"]["stop_seq_stat"] and self.config["training"]["two_stage"]:
#optimize parameter of appearance, shape encoders and decoder
optimizer_dis.zero_grad()
loss_dis.backward()
optimizer_dis.step()
out_dict.update({"loss_dis" : loss_dis.item()})
# forward pass for training of dynamics part of the model
# dynamics losses
seq_len = x_seq.shape[1]
seq_rec, mu_delta, sigmas_hat, logstd_delta = net(x_t,shape_img,poke,len=seq_len)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
if weights is not None:
seq_rec = get_patches(seq_rec,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
x_seq = get_patches(x_seq,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
for n in range(seq_len):
x_hat_tn,s_tn,*_ = net(x_seq[:,n],x_seq[:,n],poke,len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg,x_seq[:,n],seq_rec[:,n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_dyn = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if weights is not None:
rec_imgs = get_patches(rec_imgs,weights,self.config["data"],train_dataset.weight_value_flow, logger=self.logger)
#latent dynamics
dyn_losses = []
for s_tk,s_hat_tk in zip(sigmas_gt,sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk,s_tk,[]))
latent_loss_dyn = torch.stack(dyn_losses).mean()
loss_dyn = self.config["training"]["vgg_dyn_weight"] * ll_loss_dyn + self.config["training"]["latent_dynamics_weight"] * latent_loss_dyn
if self.use_norm_loss:
poke_norms = []
for p in poke:
magns = p.norm(dim=0)
ids = magns.nonzero(as_tuple=True)
if ids[0].shape[0] > 0:
poke_norms.append(magns[ids].mean().unsqueeze(0))
else:
poke_norms.append(torch.zeros(1).cuda(self.all_devices[0]))
poke_norms = torch.cat(poke_norms, 0)
norm_loss = ((poke_norms - mu_delta.reshape(poke_norms.shape[0], -1).norm(dim=-1)) ** 2).mean()
loss_dyn = loss_dyn + self.config["training"]["norm_loss_weight"] * norm_loss
out_dict.update({"norm_loss": norm_loss.item()})
# kl loss for
if self.config["architecture"]["dynamics_var"]:
kl_dyn = kl_loss(mu_delta,logstd_delta)
loss_dyn = self.config["training"]["kl_weight"] * kl_dyn
out_dict.update({"kl_dyn": kl_dyn.item()})
# pixel dynamics
loss_dec_dyn = []
for n in range(seq_len-1):
loss_dec_dyn_tn = pixel_dynamics_loss(x_seq[:,n],x_seq[:,n+1],rec_imgs[:,n],seq_rec[:,n+1])
loss_dec_dyn.append(loss_dec_dyn_tn)
loss_dec_dyn = torch.stack(loss_dec_dyn,dim=0).mean()
loss_dyn = loss_dyn + self.config["training"]["pixel_dynamics_weight"] * loss_dec_dyn
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
if self.config["gan"]["pixel_dynamics"]:
offsets = np.random.choice(np.arange(max(1,x_seq.shape[1]-train_dataset.min_frames)),size=x_seq.shape[0])
true_exmpls = torch.stack([seq[o:o+train_dataset.min_frames] for seq, o in zip(x_seq,offsets)],dim=0)
fake_exmpls = torch.stack([seq[o:o+train_dataset.min_frames] for seq, o in zip(seq_rec, offsets)], dim=0)
x_true = torch.cat([true_exmpls[:,1:],true_exmpls[:,:-1]],dim=2).reshape(-1,2*true_exmpls.shape[2],*true_exmpls.shape[3:])
x_fake = torch.cat([fake_exmpls[:, 1:], true_exmpls[:, :-1]], dim=2).reshape(-1, 2 * fake_exmpls.shape[2], *fake_exmpls.shape[3:])
else:
true_exmpls = np.random.choice(np.arange(x_seq.shape[0]*x_seq.shape[1]),self.config["gan"]["n_examples"])
fake_exmpls = np.random.choice(np.arange(seq_rec.shape[0]*seq_rec.shape[1]), self.config["gan"]["n_examples"])
x_true = x_seq.view(-1,*x_seq.shape[2:])[true_exmpls]
x_fake = seq_rec.view(-1,*seq_rec.shape[2:])[fake_exmpls]
disc_dict, loss_gen, loss_fmap = gan_trainer.train_step(x_true, x_fake)
loss_dyn = loss_dyn + self.config["gan"]["gen_weight"] * loss_gen + self.config["gan"]["fmap_weight"] * loss_fmap
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
seq_len_act = x_seq.shape[1]
offset = int(np.random.choice(np.arange(max(1,seq_len_act-train_dataset.min_frames)),1))
# offset_fake = int(np.random.choice(np.arange(max(1,seq_len_act-seq_len_temp_disc)), 1))
x_fake_tmp = seq_rec[:,offset:offset+train_dataset.min_frames].permute(0,2,1,3,4)
x_true_tmp = x_seq[:, offset:offset+train_dataset.min_frames].permute(0,2,1,3,4)
if self.config["gan_temp"]["conditional"]:
flow = batch["flow"].cuda(self.all_devices[0])
cond = get_patches(flow,weights,self.config["data"],test_dataset.weight_value_flow,self.logger) if test_dataset.flow_weights else flow
else:
cond = None
disc_dict_temp, loss_gen_temp, loss_fmap_temp = gan_trainer_temp.train_step(x_true_tmp,x_fake_tmp,cond)
loss_dyn = loss_dyn + self.config["gan_temp"]["gen_weight"] * loss_gen_temp + self.config["gan_temp"]["fmap_weight"] * loss_fmap_temp
# optimize parameters of dynamics part
optimizer_dyn.zero_grad()
loss_dyn.backward()
optimizer_dyn.step()
out_dict.update({"loss_dyn":loss_dyn.item() ,"vgg_loss_dyn" : ll_loss_dyn.item(), "latent_loss_dyn": latent_loss_dyn.item(), "lr_dec_t": self.lr_dec_t})
if self.use_gan and engine.state.iteration >= self.config["gan"]["start_iteration"]:
out_dict.update(disc_dict)
out_dict.update({"loss_gen_patch" :loss_gen.item(), "loss_fmap_patch": loss_fmap.item()})
if self.use_temp_disc and engine.state.iteration >= self.config["gan_temp"]["start_iteration"]:
out_dict.update(disc_dict_temp)
out_dict.update({"loss_gen_temp" :loss_gen_temp.item(), "loss_fmap_temp": loss_fmap_temp.item()})
#if self.pixel_decoder_loss:
out_dict.update({"pixel_loss_dec": loss_dec_dyn.item()})
return out_dict
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
def eval_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:,0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:,1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
app_img_tr = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_trans, *_ = net(app_img_tr, x_t, poke,len=0)
loss_style = style_loss(self.vgg, app_img_tr, x_trans)
out_dict.update({"style_loss_eval": loss_style.item()})
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t,x_t,poke,len=0)
x_seq_hat, mu_delta, sigmas_hat,_ = net(x_t, x_t, poke,len=seq_len)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat,weights,self.config["data"],test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
for n in range(seq_len):
x_hat_tn, s_tn, *_ = net(x_seq_gt[:, n], x_seq_gt[:, n], poke, len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg, x_seq_gt[:, n], x_seq_hat[:, n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_tk_eval = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if self.use_norm_loss:
poke_norms = []
for p in poke:
magns = p.norm(dim=0)
ids = magns.nonzero(as_tuple=True)
if ids[0].shape[0] > 0:
poke_norms.append(magns[ids].mean().unsqueeze(0))
else:
poke_norms.append(torch.zeros(1).cuda(self.all_devices[0]))
poke_norms = torch.cat(poke_norms, 0)
norm_loss = ((poke_norms - mu_delta.reshape(poke_norms.shape[0], -1).norm(dim=-1)) ** 2).mean()
out_dict.update({"norm_loss": norm_loss.item()})
if weights is not None and self.config["testing"]["metrics_on_patches"]:
rec_imgs = get_patches(rec_imgs, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in self.fid_feats_fake_per_frame:
self.fid_feats_fake_per_frame.update({t: fake_features_t})
self.fid_feats_real_per_frame.update({t: real_features_t})
else:
self.fid_feats_fake_per_frame[t] = np.concatenate([self.fid_feats_fake_per_frame[t], fake_features_t], axis=0)
self.fid_feats_real_per_frame[t] = np.concatenate([self.fid_feats_real_per_frame[t], real_features_t], axis=0)
# evaluate training losses
# ll_loss_tk_eval = vgg_loss_agg(self.vgg, x_tk, x_tk_hat)
ll_loss_t_i_eval = vgg_loss_agg(self.vgg, x_t, x_t_hat)
dyn_losses = []
for s_tk, s_hat_tk in zip(sigmas_gt, sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk, s_tk, []))
latent_loss_dyn_eval = torch.stack(dyn_losses).mean()
out_dict.update({"vgg_loss_dyn_eval": ll_loss_tk_eval.item(), "loss_dis_i_eval": ll_loss_t_i_eval.item(), "latent_loss_dyn_eval": latent_loss_dyn_eval.item()})
#if self.pixel_decoder_loss:
#x_t_hat_dec = net.dec(sigma_t, alpha)
#loss_dec_dyn = (vgg_loss_agg(self.vgg, x_t_hat_dec, x_tk_hat) - vgg_loss_agg(self.vgg, x_t, x_tk)) ** 2
loss_dec_dyn = []
for n in range(seq_len - 1):
loss_dec_dyn_tn = pixel_dynamics_loss(x_seq_gt[:, n], x_seq_gt[:, n + 1], rec_imgs[:,n], x_seq_hat[:, n + 1])
loss_dec_dyn.append(loss_dec_dyn_tn)
loss_dec_dyn = torch.stack(loss_dec_dyn, dim=0).mean()
out_dict.update({"pixel_loss_dec_eval": loss_dec_dyn.item()})
# compute metrics
ssim_t = ssim_lightning(x_t, x_t_hat)
psnr_t = psnr_lightning(x_t, x_t_hat)
lpips_t = metric_lpips(x_t,x_t_hat, self.lpips_fn, reduce=False)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat,self.lpips_fn,reduce=False,return_per_frame=True)
# ssim_pl, ssim_pl_per_frame = ssim_lightning(x_seq_gt,x_seq_hat,return_per_frame=True)
# psnr_pl, psnr_pl_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
# append to arrays
self.lpips["t"].append(lpips_t)
self.psnrs["t"].append(psnr_t)
self.ssims["t"].append(ssim_t)
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
#self.ssims["pl"].append(ssim_pl)
#self.psnrs["pl"].append(psnr_pl)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key:[ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key:[psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key:[lpips_per_frame[key]]}) for key in lpips_per_frame]
#[self.ssims_per_frame_pl[key].append(ssim_pl_per_frame[key]) if key in self.ssims_per_frame_pl else self.ssims_per_frame_pl.update({key: [ssim_pl_per_frame[key]]}) for key in ssim_pl_per_frame]
#[self.psnrs_per_frame_pl[key].append(psnr_pl_per_frame[key]) if key in self.psnrs_per_frame_pl else self.psnrs_per_frame_pl.update({key: [psnr_pl_per_frame[key]]}) for key in psnr_pl_per_frame]
return out_dict
# test_it steps are performed while generating test_imgs, there n_test_img is overall number divided by number of test iterations
n_test_img = int(self.config["testing"]["n_test_img"] // self.config["testing"]["test_it"])
def eval_visual(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1]
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
flow = eval_batch["flow"]
if self.config["architecture"]["disentanglement"]:
shape_img = eval_batch["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t,x_t, poke, len=seq_len)
x_t_hat , *_ = net(x_seq_gt[:,-1],shape_img,poke,len=0)
grid_dis = make_img_grid(x_seq_gt[:,-1],shape_img, x_t_hat,x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img, flow=flow)
seq_vis_hat = torch.cat([x_t.unsqueeze(1), x_seq_hat], 1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img,flow=flow)
it = engine.state.iteration
log_dict = {"Last Frame Comparison Test data": wandb.Image(grid_dyn, caption=f"Last frames test grid #{it}."),
"Disentanglement Grid Test Data": wandb.Image(grid_dis, caption=f"Test grid disentanglement #{it}."),
"Video Grid Test Data": wandb.Video(grid_anim,caption=f"Test Video Grid #{it}.",fps=5)}
if self.config["testing"]["eval_app_transfer"]:
app_img_unrelated = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_transferred, *_ = net(app_img_unrelated,x_t, poke,len=0)
transfer_grid = make_img_grid(app_img_unrelated,x_t,x_transferred)
log_dict.update({"Appearance transfer grid Test Data": wandb.Image(transfer_grid, caption=f"Test_grid appearance transfer #{it}")})
wandb.log(log_dict)
return None
self.logger.info("Initialize engines...")
trainer = Engine(train_step)
evaluator = Engine(eval_step)
test_img_generator = Engine(eval_visual)
self.logger.info("Finish engine initialization...")
# checkpointing
ckpt_handler = ModelCheckpoint(self.dirs["ckpt"], "reg_ckpt", n_saved=10, require_empty=False)
if self.config["training"]["two_stage"]:
save_dict = {"model": net, "optimizer_dis": optimizer_dis, "optimizer_dyn": optimizer_dyn}
else:
save_dict = {"model": net, "optimizer_dyn": optimizer_dyn}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler,
save_dict)
if self.use_gan:
ckpt_handler_disc = ModelCheckpoint(self.dirs["ckpt"], gan_trainer.load_key, n_saved=10, require_empty=False)
save_dict_disc = {"model": gan_trainer.disc, "optimizer": gan_trainer.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc,
save_dict_disc)
if self.use_temp_disc:
ckpt_handler_disc_temp = ModelCheckpoint(self.dirs["ckpt"], gan_trainer_temp.load_key, n_saved=10, require_empty=False)
save_dict_disc_temp = {"model": gan_trainer_temp.disc, "optimizer": gan_trainer_temp.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc_temp,
save_dict_disc_temp)
pbar = ProgressBar(ascii=True)
pbar.attach(trainer, output_transform=lambda x: x)
pbar.attach(evaluator, output_transform=lambda x: x)
#reduce the learning rate of the decoder for the image reconstruction task, such that the model focusses more on t --> tk
if self.config["training"]["two_stage"]:
@trainer.on(Events.ITERATION_COMPLETED)
def update_lr(engine):
self.lr_dec_t = lr_dec_rec(engine.state.iteration)
for g in optimizer_dis.param_groups:
if g["name"] == "decoder":
g["lr"] = self.lr_dec_t
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["log_intervall"]))
def log(engine):
it = engine.state.iteration
wandb.log({"iteration": it})
# log losses
for key in engine.state.output:
wandb.log({key: engine.state.output[key]})
data = engine.state.batch
if test_dataset.flow_weights:
poke = data["poke"][0].cuda(self.all_devices[0])
else:
poke = data["poke"].cuda(self.all_devices[0])
x_t = data["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = data["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = data["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
net.eval()
seq_len = x_seq_gt.shape[1]
with torch.no_grad():
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len)
x_t_hat, *_ = net(x_seq_gt[:,-1], shape_img, poke,len=0)
#x_t_hat_e, *_ = net(img_aT, img_sT, poke)
grid_dis_i = make_img_grid(x_seq_gt[:,-1], shape_img, x_t_hat, x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img)
seq_vis_hat = torch.cat([x_t.unsqueeze(1),x_seq_hat],1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img)
wandb.log({"Last Frame Comparison Train Data": wandb.Image(grid_dyn, caption=f"Last frames train grid after {it} train steps."),
"Disentanglement Grid Invariance Train Data": wandb.Image(grid_dis_i, caption=f"Invariance Disentanglement Grid on train set after {it} train steps."),
"Video Grid Train Data": wandb.Video(grid_anim, caption=f"Train Video Grid after {it} train steps",fps=5)})
#"Disentanglement Grid Equivariance Train Data": wandb.Image(grid_dis_e, caption=f"Eqiuvariance Disentanglement Grid on train set after {it} train steps.")
self.logger.info("Initialize metrics...")
# compute loss average over epochs
# Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["vgg_loss_dyn"]).attach(trainer, "vgg_loss_dyn-epoch_avg")
Average(output_transform=lambda x: x["latent_loss_dyn"]).attach(trainer, "latent_loss_dyn-epoch_avg")
if self.config["architecture"]["disentanglement"]:
Average(output_transform=lambda x: x["style_loss"]).attach(trainer, "style_loss-epoch_avg")
Average(output_transform=lambda x: x["style_loss_eval"]).attach(evaluator, "style_loss_eval")
if self.use_norm_loss:
Average(output_transform=lambda x: x["norm_loss"]).attach(trainer, "norm_loss-epoch_avg")
Average(output_transform=lambda x: x["norm_loss"]).attach(evaluator, "norm_loss_eval")
if self.config["architecture"]["dynamics_var"]:
Average(output_transform=lambda x: x["kl_dyn"]).attach(trainer, "kl_dyn_loss-epoch_avg")
if self.use_temp_disc or self.use_gan:
def gan_training_started(engine,epoch, key="gan"):
return engine.state.iteration >= self.config[key]["start_iteration"]
if self.use_gan:
use_patchgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=gan_training_started),
completed=Events.EPOCH_COMPLETED(event_filter=gan_training_started),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=gan_training_started))
# gan losses
Average(output_transform=lambda x: x["loss_gen_patch"]).attach(trainer, "loss_gen_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_patch"]).attach(trainer, "loss_fmap_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_disc_patch"]).attach(trainer, "loss_disc_patch-epoch_avg",usage=use_patchgan_metrics)
#if self.config["gan"]["gp_weighflow_video_generatort"] > 0:
Average(output_transform=lambda x: x["loss_gp_patch"]).attach(trainer, "loss_gp_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_""true_patch"]).attach(trainer, "p_true_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_fake_patch"]).attach(trainer, "p_fake_patch-epoch_avg",usage=use_patchgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def gan_stuff(engine):
gan_trainer.disc_scheduler.step()
if self.use_temp_disc:
use_tmpgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=partial(gan_training_started,key="gan_temp")),
completed=Events.EPOCH_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")))
# gan losses
Average(output_transform=lambda x: x["loss_gen_temp"]).attach(trainer, "loss_gen_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_temp"]).attach(trainer, "loss_fmap_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_disc_temp"]).attach(trainer, "loss_disc_temp-epoch_avg",usage=use_tmpgan_metrics)
#if self.config["gan"]["gp_weight"] > 0:
Average(output_transform=lambda x: x["loss_gp_temp"]).attach(trainer, "loss_gp_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_true_temp"]).attach(trainer, "p_true_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_fake_temp"]).attach(trainer, "p_fake_temp-epoch_avg",usage=use_tmpgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def temp_disc_stuff(engine):
gan_trainer_temp.disc_scheduler.step()
# if self.pixel_decoder_loss:
Average(output_transform=lambda x: x["pixel_loss_dec"]).attach(trainer, "pixel_loss_dec-epoch_avg")
Average(output_transform=lambda x: x["pixel_loss_dec_eval"]).attach(evaluator, "pixel_loss_dec_eval")
# evaluation losses
Average(output_transform=lambda x: x["vgg_loss_dyn_eval"]).attach(evaluator, "vgg_loss_dyn_eval")
Average(output_transform=lambda x: x["loss_dis_i_eval"]).attach(evaluator, "loss_dis_i_eval")
Average(output_transform=lambda x: x["latent_loss_dyn_eval"]).attach(evaluator, "latent_loss_dyn_eval")
self.logger.info("Finish metric initialization.")
@trainer.on(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]))
def metrics(engine):
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
if self.use_temp_disc:
gan_trainer_temp.disc.cpu()
if self.use_gan:
gan_trainer.disc.cpu()
# compute metrics over an epoch
self.logger.info(f"Computing metrics after epoch #{engine.state.epoch}")
bs = 20 if self.is_debug else (int(8000 / eval_sampler.batch_size) if len(test_dataset) > 8000 else len(eval_loader))
evaluator.run(eval_loader, max_epochs=1, epoch_length=bs)
[wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_t = np.mean(np.stack(self.ssims["t"], axis=0))
psnr_t = np.mean(np.stack(self.psnrs["t"], axis=0))
lpips_t = np.mean(np.concatenate(self.lpips["t"], axis=0))
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
self.lpips_avg = lpips_avg
fid_per_frame = {}
for key in tqdm(self.fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(self.fid_feats_real_per_frame[key], self.fid_feats_fake_per_frame[key])
# fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-t": ssim_t, "psnr-t": psnr_t, "fid-avg": fid_avg, "lpips-t": lpips_t,"ssim-tk": ssim_tk, "psnr-tk": psnr_tk, "lpips-avg": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
# self.ssims_per_frame_pl = {key: np.stack(self.ssims_per_frame_pl[key], axis=0).mean() for key in self.ssims_per_frame_pl}
# self.psnrs_per_frame_pl = {key: np.stack(self.psnrs_per_frame_pl[key], axis=0).mean() for key in self.psnrs_per_frame_pl}
x = [k+1 for k in self.lpips_per_frame]
make_plot(x,list(self.lpips_per_frame.values()),"LPIPS of predicted frames", ylabel="Average LPIPS",)
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM",)
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR",)
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID")
wandb.log(log_dict)
# clear collection arrays
self.__clear_metric_arrs()
self.inception_model.cpu()
self.lpips_fn.cpu()
if self.use_temp_disc:
gan_trainer_temp.disc.cuda(self.all_devices[0])
if self.use_gan:
gan_trainer.disc.cuda(self.all_devices[0])
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["test_img_intervall"]))
def make_test_grid(engine):
test_img_generator.run(test_loader, max_epochs=1, epoch_length=self.config["testing"]["test_it"])
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_avg(engine):
wandb.log({"epoch": engine.state.epoch})
[wandb.log({key: engine.state.metrics[key]}) for key in engine.state.metrics]
# also perform scheduler step
if self.config["training"]["two_stage"]:
scheduler_dis.step()
scheduler_dyn.step()
def score_fn(engine):
assert self.lpips_avg is not None
return -self.lpips_avg
# define best ckpt
best_ckpt_handler = ModelCheckpoint(self.dirs["ckpt"],filename_prefix="ckpt_metric" ,score_function=score_fn,score_name="lpips",n_saved=5,require_empty=False)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]),best_ckpt_handler,save_dict)
@trainer.on(Events.STARTED)
def set_start_it(engine):
self.logger.info(f'Engine starting from iteration {start_it}, epoch {start_epoch}')
engine.state.iteration = start_it
engine.state.epoch = start_epoch
# run everything
n_step_per_epoch = 10 if self.is_debug else len(train_loader)
self.logger.info("Start training...")
trainer.run(train_loader, max_epochs=n_epoch_overall, epoch_length=n_step_per_epoch)
self.logger.info("End training.")
def test(self):
from tqdm import tqdm
import cv2
from os import makedirs,path
# load checkpoint
mod_ckpt, _ = self._load_ckpt("reg_ckpt", single_opt=False,use_best=self.config["testing"]["best_ckpt"],)
#dir="/export/data/ablattma/visual_poking/final_models/final_models/var_length/iper/",name="baseline_wo_upsampling.pt")
dataset, transforms = get_dataset(config=self.config["data"])
test_dataset = dataset(transforms, self.datakeys, self.config["data"], train=False)
# get datasets for training and testing
def w_init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
test_sampler = SequenceLengthSampler(test_dataset,
batch_size=self.config["testing"]["test_batch_size"] if self.config["testing"]["test_batch_size"] > 16 else 16,
shuffle=False,
drop_last=True,
zero_poke=False)
test_loader = DataLoader(
test_dataset,
batch_sampler=test_sampler,
num_workers=0 if self.is_debug else self.config["data"]["num_workers"],
worker_init_fn=w_init_fn
)
# define model
self.logger.info(f"Load model...")
net_model = SkipSequenceModel if self.config["architecture"]["use_skip_model"] else ResidualSequenceBaseline
net = net_model(spatial_size=self.config["data"]["spatial_size"],
config=self.config["architecture"],)
weights = [5,5,0]
self.logger.info(
f"Number of trainable parameters in model is {sum(p.numel() for p in net.parameters())}"
)
net.load_state_dict(mod_ckpt)
net.cuda(self.all_devices[0])
self.logger.info("Model on gpu!")
net.eval()
if self.config["testing"]["mode"] == "noise_comp":
target_dir = path.join(self.dirs["generated"], "test_decoder_noise")
makedirs(target_dir, exist_ok=True)
n_gen = 0
for i,batch in enumerate(test_loader):
if n_gen > 100:
break
if test_dataset.flow_weights:
poke = batch["poke"][0].cuda(self.all_devices[0])
else:
poke = batch["poke"].cuda(self.all_devices[0])
img = batch["images"][:,0].cuda(self.all_devices[0])
with torch.no_grad():
x_rec, sigmas, *_ = net(img,img,poke,len=0)
# add noise to sigmas
for n in tqdm(range(x_rec.shape[0]),desc=f'Generating noise xs for batch #{i}'):
simgs_noise = [weights[k] * torch.randn((self.config["testing"]["n_examples_noise"],*sigm[n].shape)).cuda(self.all_devices[0])+sigm[n].unsqueeze(0) for k,sigm in enumerate(sigmas)]
# xs = torch.stack([img[n]]*self.config["testing"]["n_examples"])
simgs_noise.reverse()
xs_noise = net.dec(simgs_noise,None,del_shape=True)
xs_disp = torch.cat([x_rec[n].unsqueeze(0),xs_noise],dim=0)
xs_disp = np.concatenate([((x.permute(1,2,0).cpu().numpy()+ 1.)*127.5).astype(np.uint8) for x in xs_disp],axis=1)
n_gen += 1
xs_disp = cv2.cvtColor(xs_disp,cv2.COLOR_RGB2BGR)
cv2.imwrite(path.join(target_dir,f"noise_imgs_{n_gen}.png"),xs_disp)
elif self.config["testing"]["mode"] == "metrics":
fid_feats_real_per_frame = {}
fid_feats_fake_per_frame = {}
def metric_step(engine, eval_batch):
net.eval()
out_dict = {}
with torch.no_grad():
# prepare data
weights = None
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_t_hat, sigma_t, _, alpha = net(x_t, x_t, poke, len=0)
x_seq_hat, _, sigmas_hat, _ = net(x_t, x_t, poke, len=seq_len)
if weights is not None and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in fid_feats_fake_per_frame:
fid_feats_fake_per_frame.update({t:fake_features_t})
fid_feats_real_per_frame.update({t: real_features_t})
else:
fid_feats_fake_per_frame[t] = np.concatenate([fid_feats_fake_per_frame[t],fake_features_t], axis=0)
fid_feats_real_per_frame[t] = np.concatenate([fid_feats_real_per_frame[t], real_features_t], axis=0)
#self.features_real_fid["tk"].append(real_features_tk)
#self.features_fake_fid["tk"].append(fake_features_tk)
self.fvd_vids_real.append(x_seq_gt.cpu())
self.fvd_vids_fake.append(x_seq_hat.cpu())
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat, self.lpips_fn, reduce=False, return_per_frame=True)
# append to arrays
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key: [ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key: [psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key: [lpips_per_frame[key]]}) for key in lpips_per_frame]
return out_dict
evaluator = Engine(metric_step)
self.logger.info("Initialize inception model...")
self.inception_model = FIDInceptionModel()
self.logger.info("Finished initialization of inception model...")
# note that lpips is exactly vgg-cosine similarity as proposed in the google papers and savp
self.lpips_fn = LPIPS(net="vgg")
pbar = ProgressBar(ascii=True)
pbar.attach(evaluator, output_transform=lambda x: x)
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
# compute metrics over an epoch
self.logger.info(f"Start metrics computation.")
el = (int(8000 / test_sampler.batch_size) if len(test_dataset) > 8000 else len(test_loader))
evaluator.run(test_loader, max_epochs=1, epoch_length=el)
# [wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
assert list(fid_feats_real_per_frame.keys()) == list(fid_feats_fake_per_frame.keys())
fid_per_frame = {}
for key in tqdm(fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(fid_feats_real_per_frame[key], fid_feats_fake_per_frame[key])
#fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-avg-temp": ssim_tk, "psnr-avg_temp": psnr_tk, "fid-avg_temp": fid_avg, "lpips-avg-temp": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
savedir = path.join(self.dirs["generated"],"metric_summaries")
makedirs(savedir, exist_ok=True)
x = [k+1 for k in self.lpips_per_frame]
make_plot(x,list(self.lpips_per_frame.values()),"LPIPS of predicted frames", ylabel="Average LPIPS",savename=path.join(savedir,"lpips.svg"))
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM", savename=path.join(savedir, "ssim.svg"))
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR", savename=path.join(savedir, "psnr.svg"))
make_plot(x,list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID", savename=path.join(savedir, "fid.svg"))
self.logger.info("Averaged metrics: ")
for key in log_dict:
self.logger.info(f'{key}: {log_dict[key]}')
elif self.config["testing"]["mode"] == "fvd":
test_sampler = SequenceLengthSampler(test_dataset,16,shuffle=True, drop_last=True, n_frames=10,zero_poke=False)
test_loader = DataLoader(test_dataset,batch_sampler=test_sampler,num_workers=self.config["data"]["num_workers"],worker_init_fn=w_init_fn)
real_samples = []
fake_samples = []
def generate_vids(engine,eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
# eval forward passes
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len)
real_batch = ((x_seq_gt + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
fake_batch = ((x_seq_hat + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy().astype(np.uint8)
real_samples.append(real_batch)
fake_samples.append(fake_batch)
generator = Engine(generate_vids)
pbar = ProgressBar(ascii=True)
pbar.attach(generator, output_transform=lambda x: x)
self.logger.info(f"Start collecting sequences for fvd computation...")
el = (int(1000 / test_sampler.batch_size) if len(test_dataset) > 1000 else len(test_loader))
generator.run(test_loader,max_epochs=1,epoch_length=el)
savedir = path.join(self.dirs["generated"],"samples_fvd")
makedirs(savedir,exist_ok=True)
real_samples = np.stack(real_samples,axis=0)
fake_samples = np.stack(fake_samples, axis=0)
np.save(path.join(savedir,"real_samples.npy"),real_samples)
np.save(path.join(savedir,"fake_samples.npy"),fake_samples)
self.logger.info(f'Finish generation of vid samples.')
else:
raise ValueError(f'Specified testing mode "{self.config["testing"]["mode"]}" does not exist.') | 58,565 | 53.581547 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/frechet_video_distance.py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Minimal Reference implementation for the Frechet Video Distance (FVD).
FVD is a metric for the quality of video generation models. It is inspired by
the FID (Frechet Inception Distance) used for images, but uses a different
embedding to be better suitable for videos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
import tensorflow_hub as hub
def preprocess(videos, target_resolution):
"""Runs some preprocessing on the videos for I3D model.
Args:
videos: <T>[batch_size, num_frames, height, width, depth] The videos to be
preprocessed. We don't care about the specific dtype of the videos, it can
be anything that tf.image.resize_bilinear accepts. Values are expected to
be in the range 0-255.
target_resolution: (width, height): target video resolution
Returns:
videos: <float32>[batch_size, num_frames, height, width, depth]
"""
videos_shape = videos.shape.as_list()
all_frames = tf.reshape(videos, [-1] + videos_shape[-3:])
resized_videos = tf.image.resize_bilinear(all_frames, size=target_resolution)
target_shape = [videos_shape[0], -1] + list(target_resolution) + [3]
output_videos = tf.reshape(resized_videos, target_shape)
scaled_videos = 2. * tf.cast(output_videos, tf.float32) / 255. - 1
return scaled_videos
def _is_in_graph(tensor_name):
"""Checks whether a given tensor does exists in the graph."""
try:
tf.get_default_graph().get_tensor_by_name(tensor_name)
except KeyError:
return False
return True
class Embedder:
def __init__(self, videos):
module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(
videos.name).replace(":", "_")
module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
self.model = hub.Module(module_spec, name=module_name)
def create_id3_embedding(self,videos):
"""Embeds the given videos using the Inflated 3D Convolution network.
Downloads the graph of the I3D from tf.hub and adds it to the graph on the
first call.
Args:
videos: <float32>[batch_size, num_frames, height=224, width=224, depth=3].
Expected range is [-1, 1].
Returns:
embedding: <float32>[batch_size, embedding_size]. embedding_size depends
on the model used.
Raises:
ValueError: when a provided embedding_layer is not supported.
"""
batch_size = 16
module_spec = "https://tfhub.dev/deepmind/i3d-kinetics-400/1"
# Making sure that we import the graph separately for
# each different input video tensor.
module_name = "fvd_kinetics-400_id3_module_" + six.ensure_str(
videos.name).replace(":", "_")
assert_ops = [
tf.Assert(
tf.reduce_max(videos) <= 1.001,
["max value in frame is > 1", videos]),
tf.Assert(
tf.reduce_min(videos) >= -1.001,
["min value in frame is < -1", videos]),
tf.assert_equal(
tf.shape(videos)[0],
batch_size, ["invalid frame batch size: ",
tf.shape(videos)],
summarize=6),
]
with tf.control_dependencies(assert_ops):
videos = tf.identity(videos)
module_scope = "%s_apply_default/" % module_name
# To check whether the module has already been loaded into the graph, we look
# for a given tensor name. If this tensor name exists, we assume the function
# has been called before and the graph was imported. Otherwise we import it.
# Note: in theory, the tensor could exist, but have wrong shapes.
# This will happen if create_id3_embedding is called with a frames_placehoder
# of wrong size/batch size, because even though that will throw a tf.Assert
# on graph-execution time, it will insert the tensor (with wrong shape) into
# the graph. This is why we need the following assert.
video_batch_size = int(videos.shape[0])
assert video_batch_size in [batch_size, -1, None], "Invalid batch size"
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
if not _is_in_graph(tensor_name):
# i3d_model = hub.Module(module_spec, name=module_name)
self.model(videos)
# gets the kinetics-i3d-400-logits layer
tensor_name = module_scope + "RGB/inception_i3d/Mean:0"
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
def calculate_fvd(real_activations,
generated_activations):
"""Returns a list of ops that compute metrics as funcs of activations.
Args:
real_activations: <float32>[num_samples, embedding_size]
generated_activations: <float32>[num_samples, embedding_size]
Returns:
A scalar that contains the requested FVD.
"""
return tfgan.eval.frechet_classifier_distance_from_activations(
real_activations, generated_activations)
| 5,688 | 35.941558 | 83 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/losses.py | import torch
from torch import nn
from torchvision.models import vgg19
from collections import namedtuple
from operator import mul
from functools import reduce
from utils.general import get_member
VGGOutput = namedtuple(
"VGGOutput",
["input", "relu1_2", "relu2_2", "relu3_2", "relu4_2", "relu5_2"],
)
StyleLayers = namedtuple("StyleLayers",["relu1_2","relu2_2","relu3_3", "relu4_3"])
class PerceptualVGG(nn.Module):
def __init__(self, weights=None):
super().__init__()
self.vgg = vgg19(pretrained=True)
self.vgg.eval()
self.vgg_layers = self.vgg.features
self.register_buffer(
"mean",
torch.tensor([0.485, 0.456, 0.406], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.register_buffer(
"std",
torch.tensor([0.229, 0.224, 0.225], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.target_layers = {
"3": "relu1_2",
"8": "relu2_2",
"13": "relu3_2",
"15" : "relu3_3",
"22": "relu4_2",
"24" : "relu4_3",
"31": "relu5_2",
}
if weights is None:
self.loss_weights = {"input":1., "relu1_2": 1.,"relu2_2": 1.,"relu3_2": 1.,"relu3_3": 1.,"relu4_2": 1.,"relu4_3": 1.,"relu5_2": 1. }
else:
assert isinstance(weights, dict) and list(weights.keys()) == list(self.target_layers.keys()), f"The weights passed to PerceptualVGG have to be a dict with the keys {list(self.target_layers.keys())}"
self.loss_weights = weights
def forward(self, x):
# IMPORTANT: Input is assumed to be in range [-1,1] here.
x = (x + 1.0) / 2.0
x = (x - self.mean) / self.std
# add also common reconstruction loss in pixel space
out = {"input": x}
for name, submodule in self.vgg_layers._modules.items():
# x = submodule(x)
if name in self.target_layers:
x = submodule(x)
out[self.target_layers[name]] = x
else:
x = submodule(x)
return out
def vgg_loss(custom_vgg:PerceptualVGG, target, pred, weights=None):
"""
Implements a vgg based perceptual loss, as extensively used for image/video generation tasks
:param custom_vgg: The vgg feature extractor for the perceptual loss, definition see above
:param target:
:param pred:
:return:
"""
target_feats = custom_vgg(target)
pred_feats = custom_vgg(pred)
target_feats = VGGOutput(**{key: target_feats[key] for key in VGGOutput._fields})
pred_feats = VGGOutput(**{key: pred_feats[key] for key in VGGOutput._fields})
names = list(pred_feats._asdict().keys())
if weights is None:
losses = {}
for i, (tf, pf) in enumerate(zip(target_feats, pred_feats)):
loss = get_member(custom_vgg,"loss_weights")[VGGOutput._fields[i]] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i]: loss})
else:
losses = {
names[0]: get_member(custom_vgg,"loss_weights")[VGGOutput._fields[0]]
* torch.mean(weights * torch.abs(target_feats[0] - pred_feats[0]))
.unsqueeze(dim=-1)
.to(torch.float)
}
for i, (tf, pf) in enumerate(zip(target_feats[1:], pred_feats[1:])):
loss = get_member(custom_vgg,"loss_weights")[i + 1] * torch.mean(
torch.abs(tf - pf)
).unsqueeze(dim=-1)
losses.update({names[i + 1]: loss})
return losses
def vgg_loss_agg(vgg, target, pred, weights=None):
"""
To aggreagate the vgg losses
:param vgg:
:param target:
:param pred:
:param weights:
:return:
"""
# basic_device = target.get_device()
# net_device = list(vgg.parameters())[0].get_device()
# pred = pred.cuda(net_device)
# target = target.cuda(net_device)
loss_list = vgg_loss(vgg,target,pred,weights)
loss_tensor = torch.stack([loss_list[key] for key in loss_list],dim=0,)
return loss_tensor.sum()#.cuda(basic_device)
class PixelDynamicsLoss(nn.Module):
def __init__(self, diff_pp=False):
super().__init__()
self.diff_pp = diff_pp
def forward(self,target_t,target_tk,pred_t,pred_tk):
if self.diff_pp:
loss = (((target_t-target_tk).abs()-(pred_t.detach()-pred_tk).abs()).mean())**2
else:
loss = ((target_t-target_tk).abs().mean()-(pred_t.detach()-pred_tk).abs().mean())**2
return loss
def pixel_triplet_loss(target_t,target_tk,pred_t, pred_tk,vgg:PerceptualVGG,layerwise = True, detach=True, diff_pp=False):
"""
:param vgg:
:param target_t:
:param target_tk:
:param pred_t:
:param pred_tk:
:param layerwise:
:param detach: whether or not to detach the predicted feats at time t
:param diff_pp: whether to consider differences for each spatial location in each channel or average over all (default average)
:return:
"""
if layerwise:
losses = {}
# old_device = target_tk.get_device()
# new_device = list(vgg.parameters())[0].get_device()
# timestep t
# target_t = target_t.cuda(new_device)
# pred_t = pred_t.cuda(new_device)
target_feats_t = vgg(target_t.cuda())
pred_feats_t = vgg(pred_t.detach() if detach else pred_t)
target_feats_t = VGGOutput(**{key: target_feats_t[key] for key in VGGOutput._fields})
pred_feats_t = VGGOutput(**{key: pred_feats_t[key] for key in VGGOutput._fields})
# timestep tk
# target_tk = target_tk.cuda(new_device)
# pred_tk = pred_tk.cuda(new_device)
target_feats_tk = vgg(target_tk)
pred_feats_tk = vgg(pred_tk)
target_feats_tk = VGGOutput(**{key: target_feats_tk[key] for key in VGGOutput._fields})
pred_feats_tk = VGGOutput(**{key: pred_feats_tk[key] for key in VGGOutput._fields})
names = list(pred_feats_t._asdict().keys())
for i, (tft, pft, tftk, pftk) in enumerate(zip(target_feats_t, pred_feats_t,target_feats_tk, pred_feats_tk)):
if diff_pp:
loss = get_member(vgg,"loss_weights")[VGGOutput._fields[i]] * torch.mean((torch.abs(tft - tftk) - torch.abs(pft - pftk)) ** 2).unsqueeze(dim=-1)
else:
loss = get_member(vgg,"loss_weights")[VGGOutput._fields[i]] * (torch.mean(torch.abs(tft - tftk)).unsqueeze(dim=-1) - torch.mean(torch.abs(pft - pftk)).unsqueeze(dim=-1))**2
losses.update({names[i]: loss})
loss_tensor = torch.stack([losses[key] for key in losses], dim=0, )
ptl = loss_tensor.sum() #.cuda(old_device)
else:
ptl = (vgg_loss_agg(vgg, pred_t.detach(), pred_tk) - vgg_loss_agg(vgg, target_t, target_tk)) ** 2
return ptl
def style_loss(vgg,style_target, pred):
target_feats = vgg(style_target)
pred_feats = vgg(pred)
target_feats = StyleLayers(**{key: target_feats[key] for key in StyleLayers._fields})
pred_feats = StyleLayers(**{key: pred_feats[key] for key in StyleLayers._fields})
names = list(pred_feats._asdict().keys())
style_outs = {}
# compute gram matrices and take squared frobenius norm
for i, (tf,pf) in enumerate(zip(target_feats,pred_feats)):
shape = pf.shape
pf = pf.reshape(*shape[:2],-1)
tf = tf.reshape(*shape[:2],-1)
gram_diff = 1. / (shape[1]*shape[2]*shape[3]) * (torch.matmul(pf,pf.permute(0,2,1)) - torch.matmul(tf,tf.permute(0,2,1)))
loss = (torch.norm(gram_diff, p="fro",dim=[1,2])**2).mean()
style_outs.update({names[i]:loss})
style_outs = torch.stack([style_outs[key] for key in style_outs])
return style_outs.sum()
class DynamicsLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, config):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, anchor, positive, negative, ):
if isinstance(anchor,list) and isinstance(positive,list):
losses = []
for a,p in zip(anchor,positive):
losses.append(self.mse(a,p))
return torch.stack(losses).mean()
else:
return self.mse(anchor,positive)
def kl_loss_check(latents):
"""
Estimates a gaussian from the latents and returns the kl_divergence between this gaussian and the standard normal
:param latents:
:return:
"""
mu = latents[:,:int(latents.shape[1] / 2)]
sigma = latents[:,int(latents.shape[1] / 2):]
# reparameterize
logstd = nn.Sigmoid()(sigma)
return kl_loss(mu,logstd)
def kl_loss(mu, logstd):
if len(mu.shape) != 2:
mu = mu.reshape(mu.shape[0],-1)
logstd = logstd.reshape(mu.shape[0],-1)
dim = mu.shape[1]
std = torch.exp(logstd)
kl = torch.sum(-logstd + 0.5 * (std ** 2 + mu ** 2), dim=-1) - (0.5 * dim)
return kl.mean()
| 9,190 | 33.423221 | 210 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/metric_fvd.py | import numpy as np
import argparse
from os import path
import torch
import ssl
from glob import glob
from natsort import natsorted
ssl._create_default_https_context = ssl._create_unverified_context
import cv2
from utils.metrics import compute_fvd
from utils.general import get_logger
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", type=str,
required=True,
help="Source directory where the data is stored.")
parser.add_argument("--gpu",type=int, required=True, help="The target device.")
parser.add_argument("-v","--visualize",default=False,action="store_true")
args = parser.parse_args()
if not path.isdir(args.source):
raise NotADirectoryError(f'The specified, data-holding directory {args.source} is not existing...')
file = path.basename(__file__)
logger = get_logger(file)
logger.info("Read in data...")
real_samples_list = natsorted(glob(path.join(args.source, "real_samples_*.npy")))
fake_samples_list = natsorted(glob(path.join(args.source, "fake_samples_*.npy")))
if len(real_samples_list) == 0:
fake_samples_list = [path.join(args.source, "fake_samples.npy")]
real_samples_list = [path.join(args.source, "real_samples.npy")]
for i,(real_samples, fake_samples) in enumerate(zip(real_samples_list,fake_samples_list)):
try:
length = int(real_samples.split("/")[-1].split(".")[0].split("_")[2])
context = int(real_samples.split("/")[-1].split(".npy")[0].split("_")[-1])
logger.info(f"processing samples of length {length} with {context} context frames.")
except:
logger.info(f"Processing standard samples")
real_samples = np.load(real_samples)
fake_samples = np.load(fake_samples)
if args.visualize:
vis_real = real_samples[0,0]
vis_fake = fake_samples[0,0]
# visualize
writer = cv2.VideoWriter(
path.join(args.source, "test_vid_fake.mp4"),
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(vis_fake.shape[2], vis_fake.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in vis_fake:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
writer = cv2.VideoWriter(
path.join(args.source, "test_vid_real.mp4"),
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(vis_real.shape[2], vis_real.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in vis_real:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
real_samples = real_samples[:62] if real_samples.shape[0] > 62 else real_samples
fake_samples = fake_samples[:62] if fake_samples.shape[0] > 62 else fake_samples
logger.info(f'Number of samples: {len(fake_samples)}')
target_device = args.gpu
real_samples = list(real_samples)
fake_samples = list(fake_samples)
real_samples = [torch.from_numpy(r) for r in real_samples]
fake_samples = [torch.from_numpy(r) for r in fake_samples]
fvd_val = compute_fvd(real_samples,fake_samples,target_device,logger)
| 3,569 | 30.59292 | 107 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/testing.py | import numpy as np
import torch
from skimage.metrics import structural_similarity as ssim
import cv2
import math
import imutils
import matplotlib.pyplot as plt
import wandb
from os import path
import math
def make_flow_grid(src, poke, pred, tgt, n_logged, flow=None):
"""
:param src:
:param poke:
:param pred:
:param tgt:
:param n_logged:
:return:
"""
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
# poke = poke.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
# poke -= poke.min()
# poke /= poke.max()
# poke = (poke * 255.0).astype(np.uint8)
# poke = np.concatenate([poke, np.expand_dims(np.zeros_like(poke).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
poke = vis_flow(poke[:n_logged])
poke = np.concatenate(poke,axis=1)
# if prediction is image, just take the premuted image
if pred.shape[1] == 3:
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
else:
# if prediction is flow, you need treat it like that
pred = pred.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
pred -= pred.min()
pred /= pred.max()
pred = (pred * 255.0).astype(np.uint8)
pred = np.concatenate([pred, np.expand_dims(np.zeros_like(pred).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
tgt_gr = [cv2.cvtColor(t,cv2.COLOR_RGB2GRAY) for t in tgt]
pred_gr = [cv2.cvtColor(t,cv2.COLOR_RGB2GRAY) for t in pred]
ssim_imgs = [ssim(rimg, fimg, multichannel=True, data_range=255, gaussian_weights=True, use_sample_covariance=False, full=True)[1] for rimg, fimg in zip(tgt_gr, pred_gr)]
additional = [np.concatenate([cv2.cvtColor((s * 255.).astype(np.uint8),cv2.COLOR_GRAY2RGB) for s in ssim_imgs],axis=1)]
if flow is not None:
# if provided, use additional flow information (in case of poking, that's the entire flow src --> tgt
# add = flow.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
# add -= add.min()
# add /= add.max()
# add = (add * 255.0).astype(np.uint8)
# add = np.concatenate([add, np.expand_dims(np.zeros_like(add).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
# additional = additional + [np.concatenate([a for a in add], axis=1)]
add = vis_flow(flow[:n_logged])
add = np.concatenate(add,axis=1)
additional = additional + [add]
# compute ssim_img in grayscale
src = np.concatenate([s for s in src], axis=1)
#poke = np.concatenate([f for f in poke], axis=1)
pred = np.concatenate([p for p in pred], axis=1)
tgt = np.concatenate([t for t in tgt], axis=1)
grid = np.concatenate([src,poke,pred,tgt,*additional],axis=0)
return grid
def vis_flow(flow_map, normalize=False):
if isinstance(flow_map,torch.Tensor):
flow_map = flow_map.cpu().numpy()
flows_vis = []
for flow in flow_map:
hsv = np.zeros((*flow.shape[1:],3),dtype=np.uint8)
hsv[...,1] = 255
mag, ang = cv2.cartToPolar(flow[0], flow[1])
# since 360 is not valid for uint8, 180° corresponds to 360° for opencv hsv representation. Therefore, we're dividing the angle by 2 after conversion to degrees
hsv[...,0] = ang * 180 / np.pi / 2
hsv[...,2] = cv2.normalize(mag,None,alpha=0,beta=255, norm_type=cv2.NORM_MINMAX)
as_rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
if normalize:
as_rgb = as_rgb.astype(np.float) - as_rgb.min(axis=(0,1),keepdims=True)
as_rgb = (as_rgb / as_rgb.max(axis=(0,1),keepdims=True)*255.).astype(np.uint8)
flows_vis.append(as_rgb)
return flows_vis
def vis_flow_dense(flow_map,**kwargs):
if isinstance(flow_map,torch.Tensor):
flow_map = flow_map.cpu().numpy()
flows_vis = []
for flow in flow_map:
h, w = flow.shape[1:]
fx, fy = flow[0], flow[1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx * fx + fy * fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[..., 0] = ang * (180 / np.pi / 2)
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(v,None,alpha=0,beta=255, norm_type=cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
flows_vis.append(bgr)
return flows_vis
def make_trf_video(img1,img2,v12,v21,poke,n_logged,logwandb=True,length_divisor=5):
"""
:param img1:
:param img2:
:param v12:
:param v21:
:param poke:
:param n_logged:
:param lomake_flow_grid()gwandb:
:param length_divisor:
:return:
"""
seq_len = v12.shape[1]
pokes = vis_flow(poke[:n_logged])
img1 = ((img1.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
img2 = ((img2.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
img1_with_arrow = []
img2_with_arrow = []
eps = 1e-6
for i, (poke_p, img1_i, img2_i) in enumerate(zip(poke[:n_logged], img1, img2)):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
if poke_points[0].size == 0:
img1_with_arrow.append(img1_i)
img2_with_arrow.append(img2_i)
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:, min_y:max_y, min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / (np.linalg.norm(avg_flow) + eps) * (poke_p.shape[1] / length_divisor)
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (int((min_x + max_x) / 2), int((min_y + max_y) / 2))
arrow_end = (arrow_start[0] + int(arrow_dir[0]), arrow_start[1] + int(arrow_dir[1]))
img1_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(img1_i), arrow_start, arrow_end, (255, 0, 0), max(int(img1_i.shape[0] / 64), 1))))
img2_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(img2_i), arrow_start, arrow_end, (255, 0, 0), max(int(img2_i.shape[0] / 64), 1))))
else:
img1.append(img1_i)
img2.append(img2_i)
vid_st1 = np.concatenate(img1_with_arrow, axis=1)
vid_st2 = np.concatenate(img2_with_arrow, axis=1)
vid_st1 = put_text_to_video_row(np.stack([vid_st1] * seq_len, axis=0), "Image 1", color=(255, 0, 0))
vid_st2 = put_text_to_video_row(np.stack([vid_st2] * seq_len, axis=0), "Image 2", color=(255, 0, 0))
v12 = ((v12.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
v12 = np.concatenate(list(v12), axis=2)
v12 = put_text_to_video_row(v12, "Vid: FG1-BG2")
v21 = ((v21.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
v21 = np.concatenate(list(v21), axis=2)
v21 = put_text_to_video_row(v21, "Vid: FG2-BG1")
full = np.concatenate([vid_st1, vid_st2, v12, v21], axis=1)
if logwandb:
full = np.moveaxis(full, [0, 1, 2, 3], [0, 2, 3, 1])
return full
def draw_arrow(traj):
arrow_imgs = []
for c,t in enumerate(traj):
active_points = np.nonzero(t.astype(np.uint8).any(0) > 0)
img = np.zeros((*t.shape[1:],3),dtype=np.uint8)
if active_points[0].size>0:
for i in range(active_points[0].shape[0]):
y =active_points[0][i]
x = active_points[1][i]
arrow_dir = t[:,y,x]
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (x, y)
arrow_end = (int(np.clip(x + int(arrow_dir[0]),0,img.shape[0])), int(np.clip(y + int(arrow_dir[1]),0,img.shape[0])))
img = cv2.arrowedLine(img, arrow_start, arrow_end, (255, 0, 0), max(int(traj.shape[1] / 64), 1))
arrow_imgs.append(img)
arrow_imgs = np.concatenate(arrow_imgs,axis=1)
return arrow_imgs
def img_grid_ci(src,traj,pred,tgt,n_logged):
src = ((src.permute(0, 2, 3, 1).cpu().numpy()) * 255.).astype(np.uint8)[:n_logged]
# if prediction is image, just take the premuted image
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy()) * 255).astype(
np.uint8)[:n_logged]
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy()) * 255).astype(
np.uint8)[:n_logged]
src = np.concatenate([s for s in src], axis=1)
# poke = np.concatenate([f for f in poke], axis=1)
pred = np.concatenate([p for p in pred], axis=1)
tgt = np.concatenate([t for t in tgt], axis=1)
arrows = draw_arrow(traj[:n_logged].cpu().numpy())
grid = np.concatenate([src, arrows, pred, tgt], axis=0)
return grid
def make_video_ci(src,traj,pred,tgt,n_logged,logwandb=True, display_frame_nr=True):
seq_len = tgt.shape[1]
srcs = np.concatenate([s for s in ((src.permute(0, 2, 3, 1).cpu().numpy()) * 255.).astype(np.uint8)[:n_logged]],axis=1)
traj_vis = []
for t in range(traj.shape[1]):
arrows = draw_arrow(traj[:n_logged,t].cpu().numpy())
traj_vis.append(arrows)
traj_vis = np.stack(traj_vis,axis=0)
traj_vis = put_text_to_video_row(traj_vis, "Flow Vectors", display_frame_nr=display_frame_nr)
srcs = cv2.UMat.get(cv2.putText(cv2.UMat(srcs), f"Sequence length {seq_len}", (int(srcs.shape[1] // 3), int(srcs.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs.shape[0] / 256), (255, 0, 0), int(srcs.shape[0] / 128)))
srcs = np.stack([srcs] * seq_len, axis=0)
srcs = put_text_to_video_row(srcs, "Input Image", display_frame_nr=display_frame_nr)
pred = ((pred.permute(0, 1, 3, 4, 2).cpu().numpy()) * 255).astype(np.uint8)[:n_logged]
pred = np.concatenate(list(pred), axis=2)
pred = put_text_to_video_row(pred, "Predicted Video", display_frame_nr=display_frame_nr)
tgt = ((tgt.permute(0, 1, 3, 4, 2).cpu().numpy()) * 255).astype(np.uint8)[:n_logged]
tgt = np.concatenate(list(tgt), axis=2)
tgt = put_text_to_video_row(tgt, "Groundtruth Video", display_frame_nr=display_frame_nr)
full = np.concatenate([srcs, pred, tgt, traj_vis], axis=1)
if logwandb:
full = np.moveaxis(full, [0, 1, 2, 3], [0, 2, 3, 1])
return full
def make_video(src,poke,pred,tgt,n_logged,flow=None,length_divisor=5,logwandb=True,flow_weights= None, display_frame_nr=False,invert_poke = False):
"""
:param src: src image
:param poke: poke, also input to the network
:param pred: predicted video of the network
:param tgt: target video the network was trained to reconstruct
:param n_logged: numvber of logged examples
:param flow: src flow from which the poke is originating
:param length_divisor: divisor for the length of the arrow, that's drawn ti visualize the mean direction of the flow within the poke patch
:param logwandb: whether the output video grid is intended to be logged with wandb or not (in this case the grid channels have to be changed)
:param flow_weights: Optional weights for the flow which are also displayed if they are not None.
:return:
"""
seq_len = tgt.shape[1]
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pokes = vis_flow(poke[:n_logged])
flows_vis = None
if flow is not None:
flows = vis_flow(flow[:n_logged])
flows_with_rect = []
for i,(poke_p,flow) in enumerate(zip(pokes,flows)):
poke_points = np.nonzero(poke_p.any(-1) > 0)
if poke_points[0].size == 0:
flows_with_rect.append(np.zeros_like(flow))
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# draw rect
flow_with_rect = cv2.rectangle(flow,(min_x,min_y),(max_x,max_y),(255,255,255),max(1,int(flow.shape[0]//64)))
# flow_with_rect = cv2.UMat.get(cv2.putText(cv2.UMat(flow_with_rect), f"Flow Complete",(int(flow_with_rect.shape[1] // 3), int(5 * flow_with_rect.shape[0] / 6) ), cv2.FONT_HERSHEY_SIMPLEX,
# float(flow_with_rect.shape[0] / 256), (255, 255, 255), int(flow_with_rect.shape[0] / 128)))
flows_with_rect.append(flow_with_rect)
flow_cat = np.concatenate(flows_with_rect,axis=1)
flows_vis= [np.stack([flow_cat]*seq_len,axis=0)]
flows_vis[0] = put_text_to_video_row(flows_vis[0], "Flow Complete", color=(255, 255, 255))
if flow_weights is not None:
flow_weights = flow_weights.cpu().numpy()
heatmaps = []
for i, weight in enumerate(flow_weights):
weight_map = ((weight - weight.min()) / weight.max() * 255.).astype(np.uint8)
heatmap = cv2.applyColorMap(weight_map, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)
heatmaps.append(heatmap)
heatmaps = np.concatenate(heatmaps, axis=1)
heatmaps = np.stack([heatmaps]*seq_len, axis=0)
heatmaps = put_text_to_video_row(heatmaps, "Flow Weights", color=(255,255,255))
if flows_vis is None:
flows_vis = [heatmaps]
else:
flows_vis.insert(0,heatmaps)
srcs_with_arrow = []
pokes_with_arrow = []
if invert_poke:
srcs_with_arrow_inv = []
pokes_with_arrow_inv = []
eps = 1e-6
for i, (poke_p,src_i) in enumerate(zip(poke[:n_logged],src)):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
if poke_points[0].size==0:
pokes_with_arrow.append(np.zeros_like(pokes[i]))
srcs_with_arrow.append(src_i)
else:
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:, min_y:max_y, min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / (np.linalg.norm(avg_flow) + eps) * (poke_p.shape[1] / length_divisor)
if not math.isnan(arrow_dir[0]) or not math.isnan(arrow_dir[1]):
arrow_start = (int((min_x + max_x) / 2), int((min_y + max_y) / 2))
arrow_end = (arrow_start[0] + int(arrow_dir[0]), arrow_start[1] + int(arrow_dir[1]))
test = pokes[i]
# test = cv2.UMat.get(cv2.putText(cv2.UMat(test), f"Poke", (int(test.shape[1] // 3), int(5 * test .shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
# float(test.shape[0] / 256), (255, 255, 255), int(test.shape[0] / 128)))
pokes_with_arrow.append(cv2.arrowedLine(test, arrow_start, arrow_end, (255, 0, 0), max(int(src_i.shape[0] / 64),1)))
srcs_with_arrow.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(src_i), arrow_start, arrow_end, (255, 0, 0), max(int(src_i.shape[0] / 64),1))))
if invert_poke:
arrow_end_inv = (arrow_start[0] - int(arrow_dir[0]), arrow_start[1] - int(arrow_dir[1]))
pokes_with_arrow_inv.append(cv2.arrowedLine(test, arrow_start, arrow_end_inv, (0, 255, 0), max(int(src_i.shape[0] / 64), 1)))
srcs_with_arrow_inv.append(cv2.UMat.get(cv2.arrowedLine(cv2.UMat(src_i), arrow_start, arrow_end, (0, 255, 0), max(int(src_i.shape[0] / 64), 1))))
else:
pokes_with_arrow.append(np.zeros_like(pokes[i]))
srcs_with_arrow.append(src_i)
poke = np.concatenate(pokes_with_arrow, axis=1)
if invert_poke:
poke_inv = np.concatenate(pokes_with_arrow_inv, axis=1)
poke = put_text_to_video_row(np.stack([*[poke] * int(math.ceil(float(seq_len)/2)),*[poke_inv]*int(seq_len/2)], axis=0),"Pokes",color=(255,255,255))
else:
poke = put_text_to_video_row(np.stack([poke] * seq_len, axis=0),"Poke",color=(255,255,255))
if flows_vis is None:
flows_vis = [poke]
else:
flows_vis.append(poke)
srcs = np.concatenate(srcs_with_arrow,axis=1)
srcs = cv2.UMat.get(cv2.putText(cv2.UMat(srcs), f"Sequence length {seq_len}", (int(srcs.shape[1] // 3), int(srcs.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs.shape[0] / 256), (255, 0, 0), int(srcs.shape[0] / 128)))
if invert_poke:
srcs_inv = np.concatenate(srcs_with_arrow_inv, axis=1)
srcs_inv = cv2.UMat.get(cv2.putText(cv2.UMat(srcs_inv), f"Sequence length {seq_len}", (int(srcs_inv.shape[1] // 3), int(srcs_inv.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(srcs_inv.shape[0] / 256), (255, 0, 0), int(srcs_inv.shape[0] / 128)))
srcs = np.stack([*[srcs] * int(math.ceil(float(seq_len)/2)),*[srcs_inv]*int(seq_len/2)],axis=0)
else:
srcs = np.stack([srcs]*seq_len,axis=0)
srcs = put_text_to_video_row(srcs,"Input Image",display_frame_nr=display_frame_nr)
pred = ((pred.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = np.concatenate(list(pred),axis=2)
pred = put_text_to_video_row(pred, "Predicted Video",display_frame_nr=display_frame_nr)
tgt = ((tgt.permute(0, 1, 3, 4, 2).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
tgt = np.concatenate(list(tgt),axis=2)
tgt = put_text_to_video_row(tgt,"Groundtruth Video",display_frame_nr=display_frame_nr)
full = np.concatenate([srcs,pred,tgt,*flows_vis],axis=1)
if logwandb:
full = np.moveaxis(full,[0,1,2,3],[0,2,3,1])
return full
def put_text_to_video_row(video_row,text, color = None,display_frame_nr=False):
written = []
for i,frame in enumerate(video_row):
current = cv2.UMat.get(cv2.putText(cv2.UMat(frame), text, (int(frame.shape[1] // 3), frame.shape[0] - int(frame.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(frame.shape[0] / 256), (255, 0, 0) if color is None else color, int(frame.shape[0] / 128)))
if display_frame_nr:
current = cv2.UMat.get(cv2.putText(cv2.UMat(current), str(i+1), (int(frame.shape[1] / 32), frame.shape[0] - int(frame.shape[0] / 6)), cv2.FONT_HERSHEY_SIMPLEX,
float(frame.shape[0] / 256), (255, 0, 0) if color is None else color, int(frame.shape[0] / 128)))
written.append(current)
return np.stack(written)
def make_animated_grid(src, poke, pred, tgt, n_logged, flow=None, length_divisor=5,logwandb=True):
# visualize flows
pokes = vis_flow(poke[:n_logged])
pokes_with_arrow = []
for i,poke_p in enumerate(poke[:n_logged]):
poke_points = np.nonzero(pokes[i].any(-1) > 0)
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# plot mean direction of flow in poke region
avg_flow = np.mean(poke_p[:,min_y:max_y,min_x:max_x].cpu().numpy(), axis=(1, 2))
arrow_dir = avg_flow / np.linalg.norm(avg_flow) * (poke_p.shape[1] / length_divisor)
arrow_start = (int((min_x+max_x)/2),int((min_y+max_y)/2))
arrow_end = (arrow_start[0]+int(arrow_dir[0]),arrow_start[1]+int(arrow_dir[1]))
test = pokes[i]
pokes_with_arrow.append(cv2.arrowedLine(test,arrow_start,arrow_end,(0,0,255),2))
poke = np.concatenate(pokes_with_arrow, axis=1)
flows_vis = [np.stack([poke]*3,axis=0)]
if flow is not None:
flows = vis_flow(flow[:n_logged])
flows_with_rect = []
for i,(poke_p,flow) in enumerate(zip(pokes,flows)):
poke_points = np.nonzero(poke_p.any(-1) > 0)
min_y = np.amin(poke_points[0])
max_y = np.amax(poke_points[0])
min_x = np.amin(poke_points[1])
max_x = np.amax(poke_points[1])
# draw rect
flow_with_rect = cv2.rectangle(flow,(min_x,min_y),(max_x,max_y),(255,255,255),max(1,int(flow.shape[0]//64)))
flows_with_rect.append(flow_with_rect)
flow_cat = np.concatenate(flows_with_rect,axis=1)
flows_vis.insert(0,np.stack([flow_cat]*3,axis=0))
# visualize images
src = ((src.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
src = np.concatenate(list(src),axis=1)
pred = np.concatenate(list(pred), axis=1)
tgt = np.concatenate(list(tgt), axis=1)
src = cv2.UMat.get(cv2.putText(cv2.UMat(src), "Source", (int(src.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(src.shape[0] / 256), (0, 0, 0), int(src.shape[0] / 128)))
pred = cv2.UMat.get(cv2.putText(cv2.UMat(pred), "Predicted", (int(pred.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(pred.shape[0] / 256), (0, 0, 0), int(pred.shape[0] / 128)))
tgt = cv2.UMat.get(cv2.putText(cv2.UMat(tgt), "Target", (int(tgt.shape[0] // 4), 30), cv2.FONT_HERSHEY_SIMPLEX,
float(tgt.shape[0] / 256), (0, 0, 0), int(tgt.shape[0] / 128)))
animation = np.stack([src,pred,tgt],axis=0)
# this generates a video grid which can be used by wandb.Video()
full = np.concatenate([animation,*flows_vis],axis=1)
# wandb requires video to have shape (time, channels, height, width)
if logwandb:
full = np.moveaxis(full,[0,1,2,3],[0,2,3,1])
return full
def make_generic_grid(data, dtype, n_logged):
from utils.visualizer import FlowVisualizer
visualizer = FlowVisualizer()
final_data = []
assert(len(data)==len(dtype))
for i, batch in enumerate(data):
if dtype[i] == "flow":
add = batch.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
add -= add.min()
add /= add.max()
add = (add * 255.0).astype(np.uint8)
image = np.concatenate(
[add, np.expand_dims(np.zeros_like(add).sum(-1), axis=-1)], axis=-1).astype(np.uint8)
elif dtype[i] == "flow_3D":
add = batch.permute(0, 2, 3, 1).cpu().numpy()[:n_logged]
add -= add.min()
add /= add.max()
add = (add * 255.0).astype(np.uint8)
image = add
elif dtype[i] == "img":
image = ((batch.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
elif dtype[i] == "diff_flow_amplitude":
generated = batch[0][:n_logged].detach().cpu()
target = batch[1][:n_logged].detach().cpu()
image = visualizer.create_diff_amplitude(
visualizer.make_3d_to_2d(generated), visualizer.make_3d_to_2d(target))
image = (image*255).astype(np.uint8)[:, None].repeat(3, axis=1).transpose(0, 2, 3, 1)
elif dtype[i] == "diff_flow_direction":
generated = batch[0][:n_logged].detach().cpu()
target = batch[1][:n_logged].detach().cpu()
image = visualizer.create_diff_direction(
visualizer.make_3d_to_2d(generated), visualizer.make_3d_to_2d(target))
image = (image * 255).astype(np.uint8)[:, None].repeat(3, axis=1).transpose(0, 2, 3, 1)
elif dtype[i] == "diff_flow_clipped":
generated = batch[0][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
target = batch[1][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
image = np.sum(np.abs(generated-target), axis=-1)
image = (image[:, :, :, None]).astype(np.uint8)
image = np.clip(image, 0, 255)
image = np.repeat(image, 3, axis=-1)
elif dtype[i] == "diff_scaled":
generated = batch[0][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
target = batch[1][:n_logged].permute(0, 2, 3, 1).cpu().numpy()
image = np.sum(np.abs(generated-target), axis=-1)
image /= image.max(axis=0)
image = (image[:, :, :, None]*255.0).astype(np.uint8)
image = np.repeat(image, 3, axis=-1)
image = np.concatenate([s for s in image], axis=1)
final_data.append(image)
grid = np.concatenate(final_data, axis=0)
return grid
def make_img_grid(appearance, shape, pred, tgt= None, n_logged=4, target_label="Target Images",
label_app = "Appearance Images", label_gen = "Generated Images", label_shape = "Shape Images"):
"""
:param appearance:
:param shape:
:param pred:
:param tgt:
:param n_logged:
:return:
"""
appearance = ((appearance.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
shape = ((shape.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(np.uint8)[:n_logged]
pred = ((pred.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
if tgt is not None:
tgt = ((tgt.permute(0, 2, 3, 1).cpu().numpy() + 1.) * 127.5).astype(
np.uint8)[:n_logged]
tgt = np.concatenate([t for t in tgt], axis=1)
tgt = cv2.UMat.get(cv2.putText(cv2.UMat(tgt), target_label , (int(tgt.shape[1] // 3), tgt.shape[0] - int(tgt.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(tgt.shape[0] / 256), (255, 0, 0), int(tgt.shape[0] / 128)))
appearance = np.concatenate([s for s in appearance], axis=1)
appearance = cv2.UMat.get(cv2.putText(cv2.UMat(appearance),label_app, (int(appearance.shape[1] // 3), appearance.shape[0] - int(appearance.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(appearance.shape[0] / 256), (255, 0, 0), int(appearance.shape[0] / 128)))
shape = np.concatenate([f for f in shape], axis=1)
shape = cv2.UMat.get(cv2.putText(cv2.UMat(shape), label_shape, (int(shape.shape[1] // 3), shape.shape[0] - int(shape.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(shape.shape[0] / 256), (255, 0, 0), int(shape.shape[0] / 128)))
pred = np.concatenate([p for p in pred], axis=1)
pred = cv2.UMat.get(cv2.putText(cv2.UMat(pred), label_gen, (int(pred.shape[1] // 3), pred.shape[0] - int(pred.shape[0]/6)), cv2.FONT_HERSHEY_SIMPLEX,
float(pred.shape[0] / 256), (255, 0, 0), int(pred.shape[0] / 128)))
if tgt is None:
grid = np.concatenate([appearance, shape, pred], axis=0)
else:
grid = np.concatenate([appearance, shape, pred, tgt], axis=0)
return grid
def scale_img(img):
"""
Rescales an image to the actual pixel domain in [0,255] and converts it to the dtype
:param img:
:return:
"""
img = (img + 1.) * 127.5
if isinstance(img, torch.Tensor):
img = img.to(torch.uint8)
else:
# assumed to be numpy array
img = img.astype(np.uint8)
return img
def human_graph_cut_map(img, poke_size):
import cv2
import matplotlib.pyplot as plt
# make backgound foreground segementation
mask = np.zeros(img.shape[:2], np.uint8)
rect = (int(img.shape[1] / 5),
poke_size,
int(3. * img.shape[1] / 5),
int(img.shape[0] - 2 * poke_size))
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
mask3 = np.where((mask2 == 2) | (mask2 == 0), 0, 1).astype(np.bool)
tuples = np.where(mask3[:, :])
return tuples
new_img = np.zeros_like(img)
for t in tuples:
for i in range(3):
new_img[t[0], t[1], i] = 255
# show the output frame
plt.imshow(new_img)
plt.show()
# human_NN_map_weights = "/export/home/jsieber/poking/models/mask-rcnn-coco/frozen_inference_graph.pb"
# human_NN_map_classes = "/export/home/jsieber/poking/models/mask-rcnn-coco/object_detection_classes_coco.txt"
# human_NN_map_config = "/export/home/jsieber/poking/models/mask-rcnn-coco/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt"
# human_NN_map_LABELS = open(human_NN_map_classes).read().strip().split("\n")
#
# human_NN_map_net = cv2.dnn.readNetFromTensorflow(human_NN_map_weights, human_NN_map_config)
# def human_NN_map(frame, conf=0.5, threshold=0.3):
#
#
# # modified from https://www.pyimagesearch.com/2018/11/26/instance-segmentation-with-opencv/
# before_width = frame.shape[1]
# frame = imutils.resize(frame, width=600)
# (H, W) = frame.shape[:2]
#
# # construct a blob from the input image and then perform a
# # forward pass of the Mask R-CNN, giving us (1) the bounding
# # box coordinates of the objects in the image along with (2)
# # the pixel-wise segmentation for each specific object
# blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
# human_NN_map_net.setInput(blob)
# (boxes, masks) = human_NN_map_net.forward(["detection_out_final",
# "detection_masks"])
#
# # sort the indexes of the bounding boxes in by their corresponding
# # prediction probability (in descending order)
# idxs = np.argsort(boxes[0, 0, :, 2])[::-1]
#
# # initialize the mask, ROI, and coordinates of the person for the
# # current frame
# mask = None
# roi = None
# coords = None
#
# # loop over the indexes
# for i in idxs:
# # extract the class ID of the detection along with the
# # confidence (i.e., probability) associated with the
# # prediction
# classID = int(boxes[0, 0, i, 1])
# confidence = boxes[0, 0, i, 2]
#
# # if the detection is not the 'person' class, ignore it
# if human_NN_map_LABELS[classID] != "person":
# continue
# # filter out weak predictions by ensuring the detected
# # probability is greater than the minimum probability
# if confidence > conf:
# # scale the bounding box coordinates back relative to the
# # size of the image and then compute the width and the
# # height of the bounding box
# box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
# (startX, startY, endX, endY) = box.astype("int")
# coords = (startX, startY, endX, endY)
# boxW = endX - startX
# boxH = endY - startY
#
# # extract the pixel-wise segmentation for the object,
# # resize the mask such that it's the same dimensions of
# # the bounding box, and then finally threshold to create
# # a *binary* mask
# mask = masks[i, classID]
# mask = cv2.resize(mask, (boxW, boxH),
# interpolation=cv2.INTER_NEAREST)
# mask = (mask > threshold)
#
# # extract the ROI and break from the loop (since we make
# # the assumption there is only *one* person in the frame
# # who is also the person with the highest prediction
# # confidence)
# roi = frame[startY:endY, startX:endX][mask]
# break
#
# # initialize our output frame
# output = frame.copy()
#
# # if the mask is not None *and* we are in privacy mode, then we
# # know we can apply the mask and ROI to the output image
# if mask is not None:
# # blur the output frame
# output = np.zeros_like(output)
#
# # add the ROI to the output frame for only the masked region
# (startX, startY, endX, endY) = coords
# roi = np.ones_like(roi)*255
# output[startY:endY, startX:endX][mask] = roi
# output = imutils.resize(output, width=before_width)
#
# tuples = np.where(output[:, :, 0] > 0)
# return tuples
#
# new_img = np.zeros_like(output)
# for t in tuples:
# for i in range(3):
# new_img[t[0], t[1], i] = 255
# # show the output frame
# plt.imshow(new_img)
# plt.show()
def make_hist(hist, title, ylabel, xlabel="Frame number", bins_edges = None):
plt.ioff()
if bins_edges is None:
bins_edges = np.arange(0, len(hist) + 1).astype(np.float)
else:
assert len(list(bins_edges)) == len(list(hist)) + 1
centroids = (bins_edges[1:] + bins_edges[:-1]) / 2
hist_, bins_, _ = plt.hist(
centroids,
bins=len(hist),
weights=np.asarray(hist),
range=(min(bins_edges), max(bins_edges)),
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
wandb.log({title: wandb.Image(plt)})
plt.close()
def make_plot(x,y,title,ylabel,xlabel="frame idx", savename=None):
plt.ioff()
fig, ax = plt.subplots()
ax.plot(x,y,'rv-')
ax.set(xlabel=xlabel, ylabel=ylabel, title = title)
ax.grid()
if savename is None:
wandb.log({title:wandb.Image(plt)})
else:
fig.savefig(savename)
plt.close()
if __name__=="__main__":
frame_path = "/export/data/ablattma/Datasets/iPER/processed/001_10_1/frame_277.png"
frame = cv2.imread(frame_path)
frame = imutils.resize(frame, width=128)
import time
for i in range(3):
start_time = time.time()
human_graph_cut_map(frame, 15)
print("--- %s seconds ---" % (time.time() - start_time))
for i in range(3):
start_time = time.time()
human_NN_map(frame)
print("--- %s secondss ---" % (time.time() - start_time))
| 33,806 | 43.424442 | 204 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/fvd_models.py | from utils.general import get_logger
import os
from os import path
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--base", type=str,
default="/export/scratch/ablattma/visual_poking/fixed_length_model/generated",
help="Source directory.")
parser.add_argument("--gpu", type=int, required=True, help="The target device.")
args = parser.parse_args()
with open("config/model_names.txt", "r") as f:
model_names = f.readlines()
model_names = [m for m in model_names if not m.startswith("#")]
logger = get_logger("eval-models")
base_path = args.base
if "DATAPATH" in os.environ:
base_path = os.environ['DATAPATH'] + base_path
logger.info(f'Base path is "{base_path}"')
gpu = args.gpu
for n in model_names:
n = n.rstrip()
logger.info(f"Compute fvd for model {n}")
filepath = path.join(base_path, n, "samples_fvd")
if not any(map(lambda x: x.endswith("npy"),os.listdir(filepath))):
logger.info("no samples were found...skipping")
try:
test_cmd = f"python -m utils.metric_fvd --source {filepath} --gpu {gpu}"
os.system(test_cmd)
except Exception as e:
logger.error(e)
logger.info("next model")
continue
logger.info("finished")
| 1,429 | 25.481481 | 102 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/flownet_loader.py | import torch
from torch.nn import functional as F
from PIL import Image
from models.flownet2.models import *
from torchvision import transforms
import matplotlib.pyplot as plt
import argparse
from utils.general import get_gpu_id_with_lowest_memory
class FlownetPipeline:
def __init__(self):
super(FlownetPipeline, self).__init__()
def load_flownet(self, args, device):
"""
:param args: args from argparser
:return: The flownet pytorch model
"""
# load model savefile
save = torch.load(
"/export/scratch/compvis/datasets/plants/pretrained_models/FlowNet2_checkpoint.pth.tar")
model = FlowNet2(args, batchNorm=False)
untrained_statedict = model.state_dict()
# load it into proper clean model
model.load_state_dict(save["state_dict"])
model.eval()
return model.to(device)
def preprocess_image(self, img, img2, channelOrder="RGB",spatial_size= None):
""" This preprocesses the images for FlowNet input. Preserves the height and width order!
:param channelOrder: RGB(A) or BGR
:param img: The first image in form of (W x H x RGBA) or (H x W x RGBA)
:param img2: The first image in form of (W x H x RGBA) or (H x W x RGBA)
:return: The preprocessed input for the prediction (BGR x Img# x W x H) or (BGR x Img# x H x W)
"""
# ToTensor transforms from (H x W x C) => (C x H x W)
# also automatically casts into range [0, 1]
if spatial_size is None:
img, img2 = transforms.ToTensor()(img)[:3], transforms.ToTensor()(img2)[:3]
else:
ts = transforms.Compose([transforms.ToPILImage(), transforms.Resize(size=spatial_size,interpolation=Image.BILINEAR),transforms.ToTensor()])
img, img2 = ts(img)[:3],ts(img2)[:3]
if channelOrder == "RGB":
img, img2 = img[[2, 1, 0]], img2[[2, 1, 0]]
# Cast to proper shape (Batch x BGR x #Img x H x W)
s = img.shape
img, img2 = img[:, :int(s[1] / 64) * 64, :int(s[2] / 64) * 64], \
img2[:, :int(s[1] / 64) * 64,:int(s[2] / 64) * 64]
stacked = torch.cat([img[:, None], img2[:, None]], dim=1)
return stacked
def predict(self, model, stacked, spatial_size=None):
"""
:param stacked: The two input images. (Batch x BGR x Img# x H x W)
:return: The flow result (2 x W x H)
"""
# predict
model.eval()
prediction = model(stacked)
out_size = float(prediction.shape[-1])
if spatial_size is not None:
prediction = F.interpolate(
prediction.cpu(), size=(spatial_size,spatial_size), mode="bilinear"
)
# rescale to make it fit to new shape (not grave, if this is skipped as flow is normalized anyways later)
prediction = prediction / (out_size / spatial_size)
flow = prediction[0]
return flow
def show_results(self, prediction, with_ampl=False):
"""
prediction (Tensor): The predicted flow (2 x W x H)
:return: plots
"""
zeros = torch.zeros((1, prediction.shape[1], prediction.shape[2]))
if with_ampl:
ampl = torch.sum(prediction * prediction, dim=0)
ampl = ampl.squeeze()
else:
ampl = torch.cat([prediction, zeros], dim=0)
ampl -= ampl.min()
ampl /= ampl.max()
# show image
im = transforms.ToPILImage()(ampl)
if with_ampl:
plt.imshow(im, cmap='gray')
else:
plt.imshow(im)
if __name__ == "__main__":
# parse args
parser = argparse.ArgumentParser(description='Process some integers.')
# always 1.0, because pytorch toTensor automatically converts into range [0.0, 1.0]
parser.add_argument("--rgb_max", type=float, default=1.)
parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--fp16_scale', type=float, default=1024.,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
args = parser.parse_args()
# load test images in BGR mode
img, img2 = np.asarray(Image.open(f"/export/data/ablattma/Datasets/plants/processed/hoch_misc1/frame_0.png")), \
np.asarray(Image.open(f"/export/data/ablattma/Datasets/plants/processed/hoch_misc1/frame_100.png"))
# load Flownet
pipeline = FlownetPipeline()
flownet_device = get_gpu_id_with_lowest_memory()
flownet = pipeline.load_flownet(args, flownet_device)
# process to show flow
stacked = pipeline.preprocess_image(img, img2).to(flownet_device)
prediction = pipeline.predict(flownet, stacked[None]).cpu()
pipeline.show_results(prediction)
plt.show() | 4,882 | 37.753968 | 151 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/metrics.py | import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import inception_v3
import numpy as np
from scipy import linalg
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
from skimage.metrics import structural_similarity as ssim
from pytorch_lightning.metrics import functional as PF
from tqdm import tqdm
class FIDInceptionModel(nn.Module):
def __init__(self, normalize_range=True):
super().__init__()
self.v3 = inception_v3(pretrained=True,aux_logits=False)
# self.v3.aux_logits = False
self.register_buffer(
"mean",
torch.tensor([0.485, 0.456, 0.406], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.register_buffer(
"std",
torch.tensor([0.229, 0.224, 0.225], dtype=torch.float)
.unsqueeze(dim=0)
.unsqueeze(dim=-1)
.unsqueeze(dim=-1),
)
self.resize = nn.Upsample(size=(299,299),mode="bilinear")
self.normalize_range = normalize_range
def forward(self, x):
x = self.resize(x)
if self.normalize_range:
# normalize in between 0 and 1
x = (x + 1.) / 2.
else:
x = x.to(torch.float) / 255.
# normalize to demanded values
x = (x - self.mean) / self.std
# this reimpleents the respective layers of the inception model, see model definition
for name, submodule in self.v3._modules.items():
x = submodule(x)
if name == "Mixed_7c":
break
elif name == "Conv2d_4a_3x3" or name == "Conv2d_2b_3x3":
x = F.avg_pool2d(x, kernel_size=3, stride=2)
out = F.adaptive_avg_pool2d(x, (1, 1))
out = torch.flatten(out, 1)
return out
def metrcis_MAE(tensor1, tensor2, mean=True):
value = torch.sum(torch.abs(tensor1 - tensor2))
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metrcis_MSE(tensor1, tensor2, mean=True):
diff = tensor1 - tensor2
value = torch.sum(diff * diff)
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metrcis_l1(tensor1, tensor2, mean=False):
return metrcis_MAE(tensor1, tensor2, mean)
def metrcis_l2(tensor1, tensor2, mean=False):
diff = tensor1 - tensor2
value = torch.sum(diff * diff)
value = torch.sqrt(value)
if mean:
value /= tensor1.view((-1)).shape[0]
return value.data.cpu().numpy()
def metric_ssim(real, fake, reduce = True, return_per_frame=False):
if real.dim() == 3:
real = real[None,None]
fake = fake[None,None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
# rescale to valid range
real = ((real + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
fake = ((fake + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
ssim_batch = np.asarray([ssim(rimg, fimg, multichannel=True, data_range=1.0,
gaussian_weights=True,use_sample_covariance=False, ) for rimg, fimg in zip(real.reshape(-1,*real.shape[2:]),
fake.reshape(-1,*fake.shape[2:]))])
if return_per_frame:
ssim_per_frame = {}
for i in range(real.shape[1]):
real_test = real[:,i]
fake_test = fake[:,i]
ssim_per_frame[i] = np.asarray([ssim(real_test, fake_test,
multichannel=True, data_range=1., gaussian_weights=True, use_sample_covariance=False)])
# ssim_per_frame = {i:np.asarray([ssim(real[:,i], fake[:,i],
# multichannel=True, data_range=1., gaussian_weights=True, use_sample_covariance=False)]) for i in range(real.shape[1])}
if reduce:
if return_per_frame:
ssim_pf_reduced = {key: ssim_per_frame[key] for key in ssim_per_frame}
return np.mean(ssim_batch), ssim_pf_reduced
else:
return np.mean(ssim_batch)
if return_per_frame:
return ssim_batch, ssim_per_frame
else:
return ssim_batch
def ssim_lightning(real, fake, return_per_frame=False, normalize_range=True):
if real.dim() == 3:
real = real[None, None]
fake = fake[None, None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
if normalize_range:
real = (real + 1.) /2.
fake = (fake + 1.) / 2.
ssim_batch = PF.ssim(fake.reshape(-1,*fake.shape[2:]),real.reshape(-1,*real.shape[2:])).cpu().numpy()
if return_per_frame:
ssim_per_frame = {i: PF.ssim(fake[:,i],real[:,i]).cpu().numpy() for i in range(real.shape[1])}
return ssim_batch, ssim_per_frame
return ssim_batch
def psnr_lightning(real, fake, return_per_frame=False, normalize_range=True):
if real.dim() == 3:
real = real[None, None]
fake = fake[None, None]
elif real.dim() == 4:
real = real[None]
fake = fake[None]
if normalize_range:
real = (real + 1.) / 2.
fake = (fake + 1.) / 2.
psnr_batch = PF.psnr(fake.reshape(-1, *fake.shape[2:]), real.reshape(-1, *real.shape[2:])).cpu().numpy()
if return_per_frame:
psnr_per_frame = {i: PF.psnr(fake[:, i].contiguous(), real[:, i].contiguous()).cpu().numpy() for i in range(real.shape[1])}
return psnr_batch, psnr_per_frame
return psnr_batch
def metric_psnr(im_true, im_test,reduce = True, return_per_frame=False):
if im_true.dim() == 3:
im_true, im_test = im_true[None,None], im_test[None,None]
elif im_true.dim() == 4:
im_true, im_test = im_true[None], im_test[None]
# make channel last
real = ((im_true + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
fake = ((im_test + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
psnr_batch = np.asarray([compare_psnr(r,f, data_range=1.) for r, f in zip(real.reshape(-1,*real.shape[2:]),fake.reshape(-1,*fake.shape[2:]))])
if return_per_frame:
psnr_per_frame = {i: np.asarray([compare_psnr(real[:,i], fake[:,i], data_range=1.)]) for i in range(real.shape[1])}
if reduce:
if return_per_frame:
psnr_pf_reduced = {key: psnr_per_frame[key] for key in psnr_per_frame}
return np.mean(psnr_batch), psnr_pf_reduced
else:
return np.mean(psnr_batch)
if return_per_frame:
return psnr_batch, psnr_per_frame
else:
return psnr_batch
def metric_lpips(real, fake, lpips_func, reduce=True, return_per_frame=False, normalize=False):
if real.dim() == 3:
real, fake = real[None,None], fake[None,None]
elif real.dim() == 4:
real, fake = real[None], fake[None]
if normalize:
if fake.max() > 1:
fake = (fake.to(torch.float) / 127.5) - 1.
real = (real.to(torch.float) / 127.5) -1.
else:
real = (real * 2.) - 1.
fake = (fake * 2.) - 1.
lpips_batch = lpips_func(real.reshape(-1,*real.shape[2:]),fake.reshape(-1,*fake.shape[2:])).squeeze().cpu().numpy()
if return_per_frame:
lpips_per_frame = {i: lpips_func(real[:,i],fake[:,i]).squeeze().cpu().numpy() for i in range(real.shape[1])}
if reduce:
if return_per_frame:
lpips_pf_reduced = {key: lpips_per_frame[key].mean() for key in lpips_per_frame}
return lpips_batch.mean(), lpips_pf_reduced
else:
return lpips_batch.mean()
if return_per_frame:
return lpips_batch, lpips_per_frame
else:
return lpips_batch
def mean_cov(features):
mu = np.mean(features, axis=0)
cov = np.cov(features, rowvar=False)
return mu,cov
def metric_fid(real_features, fake_features, eps=1e-6):
# Taken and adapted from https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py
if not isinstance(real_features,np.ndarray):
real_features = np.concatenate(real_features,axis=0)
fake_features = np.concatenate(fake_features,axis=0)
mu1, cov1 = mean_cov(real_features)
mu2, cov2 = mean_cov(fake_features)
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(cov1)
sigma2 = np.atleast_2d(cov2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = f"fid calculation produces singular product; adding {eps} to diagonal of cov estimates"
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError(f"Imaginary component {m}")
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def compute_fvd(real_videos,fake_videos, device,logger):
import silence_tensorflow.auto
import tensorflow.compat.v1 as tf
from utils.frechet_video_distance import preprocess,Embedder,calculate_fvd
# required for fvd computation
# config = tf.ConfigProto()
# config.gpu_options.visible_device_list = f"{device}"
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
logger.info("Compute fvd score.")
#dev = f"/gpu:{device}"
logger.info(f"using device {device}")
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_shape = real_videos[0].shape
input_real = tf.placeholder(dtype=tf.uint8, shape=input_shape)
input_fake = tf.placeholder(dtype=tf.uint8, shape=input_shape)
real_pre = preprocess(input_real, (224, 224))
emb_real = Embedder(real_pre)
embed_real = emb_real.create_id3_embedding(real_pre)
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for rv, fv in tqdm(zip(real_videos, fake_videos)):
# real_batch = ((rv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
# fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
# real_batch = ((rv + 1.) * 127.5).cpu().numpy()
# fake_batch = ((fv + 1.) * 127.5).cpu().numpy()
feed_dict = {input_real: rv, input_fake: fv}
r, f = sess.run([embed_fake, embed_real], feed_dict)
real.append(r)
fake.append(f)
print('Compute FVD score')
real = np.concatenate(real, axis=0)
fake = np.concatenate(fake, axis=0)
embed_real = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
embed_fake = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
result = calculate_fvd(embed_real, embed_fake)
feed_dict = {embed_real: real, embed_fake: fake}
fvd_val = sess.run(result, feed_dict)
sess.close()
logger.info(f"Results of fvd computation: fvd={fvd_val}")
# for being sure
return fvd_val
if __name__ == "__main__":
z, o = torch.rand((1080, 720, 3)), torch.rand((1080, 720, 3))
o[0, 0, 0], o[1, 0, 0] = 0, 1
z[0, 0, 0], z[1, 0, 0] = 0, 1
| 12,279 | 33.985755 | 153 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/eval_pretrained.py | import argparse
from os import path
import yaml
import os
from experiments import select_experiment
def create_dir_structure(model_name, base_dir):
subdirs = ["ckpt", "config", "generated", "log"]
structure = {subdir: path.join(base_dir,model_name, subdir) for subdir in subdirs}
[os.makedirs(structure[s],exist_ok=True) for s in structure]
if "DATAPATH" in os.environ:
structure = {subdir: os.environ["DATAPATH"] +structure[subdir] for subdir in structure}
return structure
def load_parameters(model_name, base_dir):
dir_structure = create_dir_structure(model_name, base_dir)
saved_config = path.join(dir_structure["config"], "config.yaml")
if path.isfile(saved_config):
with open(saved_config, "r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise FileNotFoundError("No saved config file found but model is intended to be restarted. Aborting....")
return cdict, dir_structure,
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--base_dir', required=True,
type=str, help='the base directory, where all logs, configs, checkpoints and evaluation results will be stored.')
parser.add_argument("--gpu", type=int, required=True, help="The target device.")
parser.add_argument("--mode", default="metrics", type=str, choices=["metrics", "fvd"],
help="The mode in which the test-method should be executed.")
parser.add_argument("--metrics_on_patches", default=False, action="store_true",
help="Whether to run evaluation on patches (if available or not).")
parser.add_argument("--best_ckpt", default=False, action="store_true",
help="Whether to use the best ckpt as measured by LPIPS (otherwise, latest_ckpt is used)")
args = parser.parse_args()
with open("config/model_names.txt", "r") as f:
model_names = f.readlines()
model_names = [m for m in model_names if not m.startswith("#")]
gpu = args.gpu
for model in model_names:
model = model.rstrip()
print(f"Evaluate model : {model}")
cdict, dirs = load_parameters(model, args.base_dir)
cdict["testing"].update({"mode":args.mode})
cdict["general"]["mode"] = "test"
cdict["testing"].update({"best_ckpt": args.best_ckpt})
cdict["testing"]["metrics_on_patches"] = args.metrics_on_patches
cdict["general"]["restart"] = True
experiment = select_experiment(cdict, dirs, args.gpu)
try:
experiment.test()
except FileNotFoundError as e:
print(e)
| 2,683 | 38.470588 | 137 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.